aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/tangerine-network/mcl/src/asm
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/tangerine-network/mcl/src/asm')
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/aarch64.s13197
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/arm.s84189
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/low_arm.s154
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/low_x86-64.asm153
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/low_x86.asm0
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/x86-64.bmi2.s14155
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/x86-64.s16652
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/x86-64mac.bmi2.s13830
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/x86-64mac.s16313
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/x86.bmi2.s71547
-rw-r--r--vendor/github.com/tangerine-network/mcl/src/asm/x86.s73785
11 files changed, 303975 insertions, 0 deletions
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/aarch64.s b/vendor/github.com/tangerine-network/mcl/src/asm/aarch64.s
new file mode 100644
index 000000000..a49a36e3a
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/aarch64.s
@@ -0,0 +1,13197 @@
+ .text
+ .file "<stdin>"
+ .globl makeNIST_P192L
+ .align 2
+ .type makeNIST_P192L,@function
+makeNIST_P192L: // @makeNIST_P192L
+// BB#0:
+ movn x0, #0
+ orr x1, xzr, #0xfffffffffffffffe
+ movn x2, #0
+ ret
+.Lfunc_end0:
+ .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L
+
+ .globl mcl_fpDbl_mod_NIST_P192L
+ .align 2
+ .type mcl_fpDbl_mod_NIST_P192L,@function
+mcl_fpDbl_mod_NIST_P192L: // @mcl_fpDbl_mod_NIST_P192L
+// BB#0:
+ ldp x8, x9, [x1, #16]
+ ldp x10, x11, [x1, #32]
+ ldp x12, x13, [x1]
+ orr w14, wzr, #0x1
+ adds x13, x11, x13
+ adcs x8, x8, xzr
+ adcs x15, xzr, xzr
+ adds x12, x12, x9
+ adcs x13, x13, x10
+ adcs x8, x8, x11
+ adcs x15, x15, xzr
+ adds x11, x12, x11
+ movn x12, #0
+ adcs x9, x13, x9
+ adcs x8, x8, x10
+ adcs x10, x15, xzr
+ adds x11, x10, x11
+ adcs x9, x10, x9
+ adcs x8, x8, xzr
+ adcs x10, xzr, xzr
+ adds x13, x11, #1 // =1
+ adcs x14, x9, x14
+ adcs x15, x8, xzr
+ adcs x10, x10, x12
+ tst x10, #0x1
+ csel x10, x11, x13, ne
+ csel x9, x9, x14, ne
+ csel x8, x8, x15, ne
+ stp x10, x9, [x0]
+ str x8, [x0, #16]
+ ret
+.Lfunc_end1:
+ .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L
+
+ .globl mcl_fp_sqr_NIST_P192L
+ .align 2
+ .type mcl_fp_sqr_NIST_P192L,@function
+mcl_fp_sqr_NIST_P192L: // @mcl_fp_sqr_NIST_P192L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldr x10, [x1, #16]
+ orr w11, wzr, #0x1
+ umulh x12, x8, x8
+ mul x13, x9, x8
+ mul x14, x10, x8
+ umulh x15, x9, x8
+ adds x12, x12, x13
+ umulh x16, x10, x8
+ adcs x17, x15, x14
+ adcs x18, x16, xzr
+ mul x1, x9, x9
+ mul x2, x10, x9
+ adds x15, x15, x1
+ umulh x1, x9, x9
+ umulh x9, x10, x9
+ adcs x1, x1, x2
+ adcs x3, x9, xzr
+ adds x12, x13, x12
+ adcs x13, x15, x17
+ adcs x15, x1, x18
+ movn x17, #0
+ umulh x18, x10, x10
+ mul x10, x10, x10
+ mul x8, x8, x8
+ adcs x1, x3, xzr
+ adds x16, x16, x2
+ adcs x9, x9, x10
+ adcs x10, x18, xzr
+ adds x13, x14, x13
+ adcs x14, x16, x15
+ adcs x9, x9, x1
+ adcs x10, x10, xzr
+ adds x12, x12, x10
+ adcs x13, x13, xzr
+ adcs x15, xzr, xzr
+ adds x8, x8, x14
+ adcs x12, x12, x9
+ adcs x13, x13, x10
+ adcs x15, x15, xzr
+ adds x8, x8, x10
+ adcs x10, x12, x14
+ adcs x9, x13, x9
+ adcs x12, x15, xzr
+ adds x8, x12, x8
+ adcs x10, x12, x10
+ adcs x9, x9, xzr
+ adcs x12, xzr, xzr
+ adds x13, x8, #1 // =1
+ adcs x11, x10, x11
+ adcs x14, x9, xzr
+ adcs x12, x12, x17
+ tst x12, #0x1
+ csel x8, x8, x13, ne
+ csel x10, x10, x11, ne
+ csel x9, x9, x14, ne
+ stp x8, x10, [x0]
+ str x9, [x0, #16]
+ ret
+.Lfunc_end2:
+ .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L
+
+ .globl mcl_fp_mulNIST_P192L
+ .align 2
+ .type mcl_fp_mulNIST_P192L,@function
+mcl_fp_mulNIST_P192L: // @mcl_fp_mulNIST_P192L
+// BB#0:
+ stp x20, x19, [sp, #-32]!
+ stp x29, x30, [sp, #16]
+ add x29, sp, #16 // =16
+ sub sp, sp, #48 // =48
+ mov x19, x0
+ mov x0, sp
+ bl mcl_fpDbl_mulPre3L
+ ldp x9, x8, [sp, #8]
+ ldp x11, x10, [sp, #32]
+ ldr x12, [sp, #24]
+ ldr x13, [sp]
+ orr w14, wzr, #0x1
+ adds x9, x10, x9
+ adcs x8, x8, xzr
+ adcs x15, xzr, xzr
+ adds x13, x13, x12
+ adcs x9, x9, x11
+ adcs x8, x8, x10
+ adcs x15, x15, xzr
+ adds x10, x13, x10
+ movn x13, #0
+ adcs x9, x9, x12
+ adcs x8, x8, x11
+ adcs x11, x15, xzr
+ adds x10, x11, x10
+ adcs x9, x11, x9
+ adcs x8, x8, xzr
+ adcs x11, xzr, xzr
+ adds x12, x10, #1 // =1
+ adcs x14, x9, x14
+ adcs x15, x8, xzr
+ adcs x11, x11, x13
+ tst x11, #0x1
+ csel x10, x10, x12, ne
+ csel x9, x9, x14, ne
+ csel x8, x8, x15, ne
+ stp x10, x9, [x19]
+ str x8, [x19, #16]
+ sub sp, x29, #16 // =16
+ ldp x29, x30, [sp, #16]
+ ldp x20, x19, [sp], #32
+ ret
+.Lfunc_end3:
+ .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L
+
+ .globl mcl_fpDbl_mod_NIST_P521L
+ .align 2
+ .type mcl_fpDbl_mod_NIST_P521L,@function
+mcl_fpDbl_mod_NIST_P521L: // @mcl_fpDbl_mod_NIST_P521L
+// BB#0:
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+ ldp x8, x9, [x1, #112]
+ ldr x10, [x1, #128]
+ ldp x11, x12, [x1, #96]
+ ldp x13, x14, [x1, #80]
+ ldp x15, x16, [x1, #64]
+ ldp x17, x18, [x1, #48]
+ ldp x2, x3, [x1, #32]
+ ldp x4, x5, [x1, #16]
+ ldp x6, x1, [x1]
+ extr x7, x10, x9, #9
+ extr x9, x9, x8, #9
+ extr x8, x8, x12, #9
+ extr x12, x12, x11, #9
+ extr x11, x11, x14, #9
+ extr x14, x14, x13, #9
+ extr x13, x13, x16, #9
+ extr x16, x16, x15, #9
+ and x15, x15, #0x1ff
+ lsr x10, x10, #9
+ adds x16, x16, x6
+ adcs x13, x13, x1
+ adcs x14, x14, x4
+ adcs x11, x11, x5
+ adcs x12, x12, x2
+ adcs x1, x8, x3
+ adcs x17, x9, x17
+ adcs x18, x7, x18
+ adcs x2, x10, x15
+ ubfx x8, x2, #9, #1
+ adds x8, x8, x16
+ adcs x9, x13, xzr
+ and x13, x9, x8
+ adcs x10, x14, xzr
+ and x13, x13, x10
+ adcs x11, x11, xzr
+ and x13, x13, x11
+ adcs x12, x12, xzr
+ and x14, x13, x12
+ adcs x13, x1, xzr
+ and x15, x14, x13
+ adcs x14, x17, xzr
+ and x16, x15, x14
+ adcs x15, x18, xzr
+ and x17, x16, x15
+ adcs x16, x2, xzr
+ orr x18, x16, #0xfffffffffffffe00
+ and x17, x17, x18
+ cmn x17, #1 // =1
+ b.eq .LBB4_2
+// BB#1: // %nonzero
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ stp x12, x13, [x0, #32]
+ stp x14, x15, [x0, #48]
+ and x8, x16, #0x1ff
+ str x8, [x0, #64]
+ ldp x29, x30, [sp], #16
+ ret
+.LBB4_2: // %zero
+ mov w1, wzr
+ movz w2, #0x48
+ bl memset
+ ldp x29, x30, [sp], #16
+ ret
+.Lfunc_end4:
+ .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L
+
+ .globl mcl_fp_mulUnitPre1L
+ .align 2
+ .type mcl_fp_mulUnitPre1L,@function
+mcl_fp_mulUnitPre1L: // @mcl_fp_mulUnitPre1L
+// BB#0:
+ ldr x8, [x1]
+ mul x9, x8, x2
+ umulh x8, x8, x2
+ stp x9, x8, [x0]
+ ret
+.Lfunc_end5:
+ .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L
+
+ .globl mcl_fpDbl_mulPre1L
+ .align 2
+ .type mcl_fpDbl_mulPre1L,@function
+mcl_fpDbl_mulPre1L: // @mcl_fpDbl_mulPre1L
+// BB#0:
+ ldr x8, [x1]
+ ldr x9, [x2]
+ mul x10, x9, x8
+ umulh x8, x9, x8
+ stp x10, x8, [x0]
+ ret
+.Lfunc_end6:
+ .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L
+
+ .globl mcl_fpDbl_sqrPre1L
+ .align 2
+ .type mcl_fpDbl_sqrPre1L,@function
+mcl_fpDbl_sqrPre1L: // @mcl_fpDbl_sqrPre1L
+// BB#0:
+ ldr x8, [x1]
+ mul x9, x8, x8
+ umulh x8, x8, x8
+ stp x9, x8, [x0]
+ ret
+.Lfunc_end7:
+ .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L
+
+ .globl mcl_fp_mont1L
+ .align 2
+ .type mcl_fp_mont1L,@function
+mcl_fp_mont1L: // @mcl_fp_mont1L
+// BB#0:
+ ldr x8, [x2]
+ ldr x9, [x1]
+ ldur x10, [x3, #-8]
+ ldr x11, [x3]
+ umulh x12, x9, x8
+ mul x8, x9, x8
+ mul x9, x8, x10
+ umulh x10, x9, x11
+ mul x9, x9, x11
+ cmn x9, x8
+ adcs x8, x10, x12
+ adcs x9, xzr, xzr
+ subs x10, x8, x11
+ sbcs x9, x9, xzr
+ tst x9, #0x1
+ csel x8, x8, x10, ne
+ str x8, [x0]
+ ret
+.Lfunc_end8:
+ .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L
+
+ .globl mcl_fp_montNF1L
+ .align 2
+ .type mcl_fp_montNF1L,@function
+mcl_fp_montNF1L: // @mcl_fp_montNF1L
+// BB#0:
+ ldr x8, [x2]
+ ldr x9, [x1]
+ ldur x10, [x3, #-8]
+ ldr x11, [x3]
+ umulh x12, x9, x8
+ mul x8, x9, x8
+ mul x9, x8, x10
+ umulh x10, x9, x11
+ mul x9, x9, x11
+ cmn x9, x8
+ adcs x8, x10, x12
+ sub x9, x8, x11
+ cmp x9, #0 // =0
+ csel x8, x8, x9, lt
+ str x8, [x0]
+ ret
+.Lfunc_end9:
+ .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L
+
+ .globl mcl_fp_montRed1L
+ .align 2
+ .type mcl_fp_montRed1L,@function
+mcl_fp_montRed1L: // @mcl_fp_montRed1L
+// BB#0:
+ ldur x8, [x2, #-8]
+ ldp x9, x11, [x1]
+ ldr x10, [x2]
+ mul x8, x9, x8
+ umulh x12, x8, x10
+ mul x8, x8, x10
+ cmn x9, x8
+ adcs x8, x11, x12
+ adcs x9, xzr, xzr
+ subs x10, x8, x10
+ sbcs x9, x9, xzr
+ tst x9, #0x1
+ csel x8, x8, x10, ne
+ str x8, [x0]
+ ret
+.Lfunc_end10:
+ .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L
+
+ .globl mcl_fp_addPre1L
+ .align 2
+ .type mcl_fp_addPre1L,@function
+mcl_fp_addPre1L: // @mcl_fp_addPre1L
+// BB#0:
+ ldr x8, [x1]
+ ldr x9, [x2]
+ adds x9, x9, x8
+ adcs x8, xzr, xzr
+ str x9, [x0]
+ mov x0, x8
+ ret
+.Lfunc_end11:
+ .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L
+
+ .globl mcl_fp_subPre1L
+ .align 2
+ .type mcl_fp_subPre1L,@function
+mcl_fp_subPre1L: // @mcl_fp_subPre1L
+// BB#0:
+ ldr x8, [x2]
+ ldr x9, [x1]
+ subs x9, x9, x8
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ str x9, [x0]
+ mov x0, x8
+ ret
+.Lfunc_end12:
+ .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L
+
+ .globl mcl_fp_shr1_1L
+ .align 2
+ .type mcl_fp_shr1_1L,@function
+mcl_fp_shr1_1L: // @mcl_fp_shr1_1L
+// BB#0:
+ ldr x8, [x1]
+ lsr x8, x8, #1
+ str x8, [x0]
+ ret
+.Lfunc_end13:
+ .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L
+
+ .globl mcl_fp_add1L
+ .align 2
+ .type mcl_fp_add1L,@function
+mcl_fp_add1L: // @mcl_fp_add1L
+// BB#0:
+ ldr x8, [x1]
+ ldr x9, [x2]
+ ldr x10, [x3]
+ adds x8, x9, x8
+ str x8, [x0]
+ adcs x9, xzr, xzr
+ subs x8, x8, x10
+ sbcs x9, x9, xzr
+ and w9, w9, #0x1
+ tbnz w9, #0, .LBB14_2
+// BB#1: // %nocarry
+ str x8, [x0]
+.LBB14_2: // %carry
+ ret
+.Lfunc_end14:
+ .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L
+
+ .globl mcl_fp_addNF1L
+ .align 2
+ .type mcl_fp_addNF1L,@function
+mcl_fp_addNF1L: // @mcl_fp_addNF1L
+// BB#0:
+ ldr x8, [x1]
+ ldr x9, [x2]
+ ldr x10, [x3]
+ add x8, x9, x8
+ sub x9, x8, x10
+ cmp x9, #0 // =0
+ csel x8, x8, x9, lt
+ str x8, [x0]
+ ret
+.Lfunc_end15:
+ .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L
+
+ .globl mcl_fp_sub1L
+ .align 2
+ .type mcl_fp_sub1L,@function
+mcl_fp_sub1L: // @mcl_fp_sub1L
+// BB#0:
+ ldr x8, [x2]
+ ldr x9, [x1]
+ subs x8, x9, x8
+ str x8, [x0]
+ ngcs x9, xzr
+ and w9, w9, #0x1
+ tbnz w9, #0, .LBB16_2
+// BB#1: // %nocarry
+ ret
+.LBB16_2: // %carry
+ ldr x9, [x3]
+ add x8, x9, x8
+ str x8, [x0]
+ ret
+.Lfunc_end16:
+ .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L
+
+ .globl mcl_fp_subNF1L
+ .align 2
+ .type mcl_fp_subNF1L,@function
+mcl_fp_subNF1L: // @mcl_fp_subNF1L
+// BB#0:
+ ldr x8, [x2]
+ ldr x9, [x1]
+ ldr x10, [x3]
+ sub x8, x9, x8
+ and x9, x10, x8, asr #63
+ add x8, x9, x8
+ str x8, [x0]
+ ret
+.Lfunc_end17:
+ .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L
+
+ .globl mcl_fpDbl_add1L
+ .align 2
+ .type mcl_fpDbl_add1L,@function
+mcl_fpDbl_add1L: // @mcl_fpDbl_add1L
+// BB#0:
+ ldp x8, x11, [x1]
+ ldp x9, x10, [x2]
+ ldr x12, [x3]
+ adds x8, x9, x8
+ str x8, [x0]
+ adcs x8, x10, x11
+ adcs x9, xzr, xzr
+ subs x10, x8, x12
+ sbcs x9, x9, xzr
+ tst x9, #0x1
+ csel x8, x8, x10, ne
+ str x8, [x0, #8]
+ ret
+.Lfunc_end18:
+ .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L
+
+ .globl mcl_fpDbl_sub1L
+ .align 2
+ .type mcl_fpDbl_sub1L,@function
+mcl_fpDbl_sub1L: // @mcl_fpDbl_sub1L
+// BB#0:
+ ldp x8, x11, [x1]
+ ldp x9, x10, [x2]
+ ldr x12, [x3]
+ subs x8, x8, x9
+ str x8, [x0]
+ sbcs x8, x11, x10
+ ngcs x9, xzr
+ tst x9, #0x1
+ csel x9, x12, xzr, ne
+ add x8, x9, x8
+ str x8, [x0, #8]
+ ret
+.Lfunc_end19:
+ .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L
+
+ .globl mcl_fp_mulUnitPre2L
+ .align 2
+ .type mcl_fp_mulUnitPre2L,@function
+mcl_fp_mulUnitPre2L: // @mcl_fp_mulUnitPre2L
+// BB#0:
+ ldp x8, x9, [x1]
+ mul x10, x8, x2
+ mul x11, x9, x2
+ umulh x8, x8, x2
+ umulh x9, x9, x2
+ adds x8, x8, x11
+ stp x10, x8, [x0]
+ adcs x8, x9, xzr
+ str x8, [x0, #16]
+ ret
+.Lfunc_end20:
+ .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L
+
+ .globl mcl_fpDbl_mulPre2L
+ .align 2
+ .type mcl_fpDbl_mulPre2L,@function
+mcl_fpDbl_mulPre2L: // @mcl_fpDbl_mulPre2L
+// BB#0:
+ ldp x8, x11, [x2]
+ ldp x9, x10, [x1]
+ mul x12, x9, x8
+ umulh x13, x10, x8
+ mul x14, x10, x8
+ umulh x8, x9, x8
+ mul x15, x9, x11
+ mul x16, x10, x11
+ umulh x9, x9, x11
+ umulh x10, x10, x11
+ adds x8, x8, x14
+ adcs x11, x13, xzr
+ adds x8, x8, x15
+ stp x12, x8, [x0]
+ adcs x8, x11, x16
+ adcs x11, xzr, xzr
+ adds x8, x8, x9
+ str x8, [x0, #16]
+ adcs x8, x11, x10
+ str x8, [x0, #24]
+ ret
+.Lfunc_end21:
+ .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L
+
+ .globl mcl_fpDbl_sqrPre2L
+ .align 2
+ .type mcl_fpDbl_sqrPre2L,@function
+mcl_fpDbl_sqrPre2L: // @mcl_fpDbl_sqrPre2L
+// BB#0:
+ ldp x8, x9, [x1]
+ mul x10, x8, x8
+ umulh x11, x9, x8
+ mul x12, x9, x8
+ umulh x8, x8, x8
+ umulh x13, x9, x9
+ mul x9, x9, x9
+ str x10, [x0]
+ adds x8, x8, x12
+ adcs x10, x11, xzr
+ adds x9, x11, x9
+ adcs x11, x13, xzr
+ adds x8, x12, x8
+ str x8, [x0, #8]
+ adcs x8, x9, x10
+ str x8, [x0, #16]
+ adcs x8, x11, xzr
+ str x8, [x0, #24]
+ ret
+.Lfunc_end22:
+ .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L
+
+ .globl mcl_fp_mont2L
+ .align 2
+ .type mcl_fp_mont2L,@function
+mcl_fp_mont2L: // @mcl_fp_mont2L
+// BB#0:
+ ldp x8, x14, [x2]
+ ldp x9, x10, [x1]
+ ldur x11, [x3, #-8]
+ ldp x12, x13, [x3]
+ umulh x15, x10, x8
+ mul x16, x10, x8
+ umulh x17, x9, x8
+ mul x8, x9, x8
+ umulh x18, x14, x10
+ mul x10, x14, x10
+ umulh x1, x14, x9
+ mul x9, x14, x9
+ adds x14, x17, x16
+ mul x16, x8, x11
+ adcs x15, x15, xzr
+ mul x17, x16, x13
+ umulh x2, x16, x12
+ adds x17, x2, x17
+ umulh x2, x16, x13
+ mul x16, x16, x12
+ adcs x2, x2, xzr
+ cmn x16, x8
+ adcs x8, x17, x14
+ adcs x14, x2, x15
+ adcs x15, xzr, xzr
+ adds x10, x1, x10
+ adcs x16, x18, xzr
+ adds x8, x8, x9
+ adcs x9, x14, x10
+ mul x10, x8, x11
+ adcs x11, x15, x16
+ umulh x14, x10, x13
+ mul x15, x10, x13
+ umulh x16, x10, x12
+ mul x10, x10, x12
+ adcs x17, xzr, xzr
+ adds x15, x16, x15
+ adcs x14, x14, xzr
+ cmn x10, x8
+ adcs x8, x15, x9
+ adcs x9, x14, x11
+ adcs x10, x17, xzr
+ subs x11, x8, x12
+ sbcs x12, x9, x13
+ sbcs x10, x10, xzr
+ tst x10, #0x1
+ csel x8, x8, x11, ne
+ csel x9, x9, x12, ne
+ stp x8, x9, [x0]
+ ret
+.Lfunc_end23:
+ .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L
+
+ .globl mcl_fp_montNF2L
+ .align 2
+ .type mcl_fp_montNF2L,@function
+mcl_fp_montNF2L: // @mcl_fp_montNF2L
+// BB#0:
+ ldp x8, x14, [x2]
+ ldp x9, x10, [x1]
+ ldur x11, [x3, #-8]
+ ldp x12, x13, [x3]
+ umulh x15, x10, x8
+ mul x16, x10, x8
+ umulh x17, x9, x8
+ mul x8, x9, x8
+ umulh x18, x14, x10
+ mul x10, x14, x10
+ umulh x1, x14, x9
+ mul x9, x14, x9
+ adds x14, x17, x16
+ mul x16, x8, x11
+ adcs x15, x15, xzr
+ mul x17, x16, x12
+ cmn x17, x8
+ mul x8, x16, x13
+ umulh x17, x16, x13
+ umulh x16, x16, x12
+ adcs x8, x8, x14
+ adcs x14, x15, xzr
+ adds x8, x8, x16
+ adcs x14, x14, x17
+ adds x10, x1, x10
+ adcs x15, x18, xzr
+ adds x8, x9, x8
+ adcs x9, x10, x14
+ mul x10, x8, x11
+ adcs x11, x15, xzr
+ mul x14, x10, x13
+ mul x15, x10, x12
+ umulh x16, x10, x13
+ umulh x10, x10, x12
+ cmn x15, x8
+ adcs x8, x14, x9
+ adcs x9, x11, xzr
+ adds x8, x8, x10
+ adcs x9, x9, x16
+ subs x10, x8, x12
+ sbcs x11, x9, x13
+ cmp x11, #0 // =0
+ csel x8, x8, x10, lt
+ csel x9, x9, x11, lt
+ stp x8, x9, [x0]
+ ret
+.Lfunc_end24:
+ .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L
+
+ .globl mcl_fp_montRed2L
+ .align 2
+ .type mcl_fp_montRed2L,@function
+mcl_fp_montRed2L: // @mcl_fp_montRed2L
+// BB#0:
+ ldur x8, [x2, #-8]
+ ldp x9, x14, [x1]
+ ldp x10, x11, [x2]
+ ldp x12, x13, [x1, #16]
+ mul x15, x9, x8
+ mul x16, x15, x11
+ umulh x17, x15, x10
+ adds x16, x17, x16
+ umulh x17, x15, x11
+ mul x15, x15, x10
+ adcs x17, x17, xzr
+ cmn x9, x15
+ adcs x9, x14, x16
+ adcs x12, x12, x17
+ mul x8, x9, x8
+ adcs x13, x13, xzr
+ umulh x14, x8, x11
+ mul x15, x8, x11
+ umulh x16, x8, x10
+ mul x8, x8, x10
+ adcs x17, xzr, xzr
+ adds x15, x16, x15
+ adcs x14, x14, xzr
+ cmn x8, x9
+ adcs x8, x15, x12
+ adcs x9, x14, x13
+ adcs x12, x17, xzr
+ subs x10, x8, x10
+ sbcs x11, x9, x11
+ sbcs x12, x12, xzr
+ tst x12, #0x1
+ csel x8, x8, x10, ne
+ csel x9, x9, x11, ne
+ stp x8, x9, [x0]
+ ret
+.Lfunc_end25:
+ .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L
+
+ .globl mcl_fp_addPre2L
+ .align 2
+ .type mcl_fp_addPre2L,@function
+mcl_fp_addPre2L: // @mcl_fp_addPre2L
+// BB#0:
+ ldp x8, x11, [x1]
+ ldp x9, x10, [x2]
+ adds x8, x9, x8
+ str x8, [x0]
+ adcs x9, x10, x11
+ adcs x8, xzr, xzr
+ str x9, [x0, #8]
+ mov x0, x8
+ ret
+.Lfunc_end26:
+ .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L
+
+ .globl mcl_fp_subPre2L
+ .align 2
+ .type mcl_fp_subPre2L,@function
+mcl_fp_subPre2L: // @mcl_fp_subPre2L
+// BB#0:
+ ldp x8, x11, [x1]
+ ldp x9, x10, [x2]
+ subs x8, x8, x9
+ str x8, [x0]
+ sbcs x9, x11, x10
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ str x9, [x0, #8]
+ mov x0, x8
+ ret
+.Lfunc_end27:
+ .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L
+
+ .globl mcl_fp_shr1_2L
+ .align 2
+ .type mcl_fp_shr1_2L,@function
+mcl_fp_shr1_2L: // @mcl_fp_shr1_2L
+// BB#0:
+ ldp x8, x9, [x1]
+ extr x8, x9, x8, #1
+ lsr x9, x9, #1
+ stp x8, x9, [x0]
+ ret
+.Lfunc_end28:
+ .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L
+
+ .globl mcl_fp_add2L
+ .align 2
+ .type mcl_fp_add2L,@function
+mcl_fp_add2L: // @mcl_fp_add2L
+// BB#0:
+ ldp x8, x11, [x1]
+ ldp x9, x10, [x2]
+ adds x8, x9, x8
+ ldp x9, x12, [x3]
+ adcs x10, x10, x11
+ stp x8, x10, [x0]
+ adcs x11, xzr, xzr
+ subs x9, x8, x9
+ sbcs x8, x10, x12
+ sbcs x10, x11, xzr
+ and w10, w10, #0x1
+ tbnz w10, #0, .LBB29_2
+// BB#1: // %nocarry
+ stp x9, x8, [x0]
+.LBB29_2: // %carry
+ ret
+.Lfunc_end29:
+ .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L
+
+ .globl mcl_fp_addNF2L
+ .align 2
+ .type mcl_fp_addNF2L,@function
+mcl_fp_addNF2L: // @mcl_fp_addNF2L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x2]
+ ldp x12, x13, [x3]
+ adds x8, x10, x8
+ adcs x9, x11, x9
+ subs x10, x8, x12
+ sbcs x11, x9, x13
+ cmp x11, #0 // =0
+ csel x8, x8, x10, lt
+ csel x9, x9, x11, lt
+ stp x8, x9, [x0]
+ ret
+.Lfunc_end30:
+ .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L
+
+ .globl mcl_fp_sub2L
+ .align 2
+ .type mcl_fp_sub2L,@function
+mcl_fp_sub2L: // @mcl_fp_sub2L
+// BB#0:
+ ldp x8, x11, [x1]
+ ldp x9, x10, [x2]
+ subs x9, x8, x9
+ sbcs x8, x11, x10
+ stp x9, x8, [x0]
+ ngcs x10, xzr
+ and w10, w10, #0x1
+ tbnz w10, #0, .LBB31_2
+// BB#1: // %nocarry
+ ret
+.LBB31_2: // %carry
+ ldp x10, x11, [x3]
+ adds x9, x10, x9
+ adcs x8, x11, x8
+ stp x9, x8, [x0]
+ ret
+.Lfunc_end31:
+ .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L
+
+ .globl mcl_fp_subNF2L
+ .align 2
+ .type mcl_fp_subNF2L,@function
+mcl_fp_subNF2L: // @mcl_fp_subNF2L
+// BB#0:
+ ldp x8, x11, [x1]
+ ldp x9, x10, [x2]
+ subs x8, x8, x9
+ ldp x9, x12, [x3]
+ sbcs x10, x11, x10
+ asr x11, x10, #63
+ and x9, x11, x9
+ and x11, x11, x12
+ adds x8, x9, x8
+ str x8, [x0]
+ adcs x8, x11, x10
+ str x8, [x0, #8]
+ ret
+.Lfunc_end32:
+ .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L
+
+ .globl mcl_fpDbl_add2L
+ .align 2
+ .type mcl_fpDbl_add2L,@function
+mcl_fpDbl_add2L: // @mcl_fpDbl_add2L
+// BB#0:
+ ldp x8, x9, [x2, #16]
+ ldp x10, x15, [x1]
+ ldp x11, x14, [x2]
+ ldp x12, x13, [x1, #16]
+ adds x10, x11, x10
+ ldp x11, x16, [x3]
+ str x10, [x0]
+ adcs x10, x14, x15
+ str x10, [x0, #8]
+ adcs x8, x8, x12
+ adcs x9, x9, x13
+ adcs x10, xzr, xzr
+ subs x11, x8, x11
+ sbcs x12, x9, x16
+ sbcs x10, x10, xzr
+ tst x10, #0x1
+ csel x8, x8, x11, ne
+ csel x9, x9, x12, ne
+ stp x8, x9, [x0, #16]
+ ret
+.Lfunc_end33:
+ .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L
+
+ .globl mcl_fpDbl_sub2L
+ .align 2
+ .type mcl_fpDbl_sub2L,@function
+mcl_fpDbl_sub2L: // @mcl_fpDbl_sub2L
+// BB#0:
+ ldp x8, x9, [x2, #16]
+ ldp x10, x14, [x2]
+ ldp x11, x15, [x1]
+ ldp x12, x13, [x1, #16]
+ subs x10, x11, x10
+ ldp x11, x16, [x3]
+ str x10, [x0]
+ sbcs x10, x15, x14
+ str x10, [x0, #8]
+ sbcs x8, x12, x8
+ sbcs x9, x13, x9
+ ngcs x10, xzr
+ tst x10, #0x1
+ csel x10, x16, xzr, ne
+ csel x11, x11, xzr, ne
+ adds x8, x11, x8
+ str x8, [x0, #16]
+ adcs x8, x10, x9
+ str x8, [x0, #24]
+ ret
+.Lfunc_end34:
+ .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L
+
+ .globl mcl_fp_mulUnitPre3L
+ .align 2
+ .type mcl_fp_mulUnitPre3L,@function
+mcl_fp_mulUnitPre3L: // @mcl_fp_mulUnitPre3L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldr x10, [x1, #16]
+ mul x11, x8, x2
+ mul x12, x9, x2
+ umulh x8, x8, x2
+ mul x13, x10, x2
+ umulh x9, x9, x2
+ umulh x10, x10, x2
+ adds x8, x8, x12
+ stp x11, x8, [x0]
+ adcs x8, x9, x13
+ str x8, [x0, #16]
+ adcs x8, x10, xzr
+ str x8, [x0, #24]
+ ret
+.Lfunc_end35:
+ .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L
+
+ .globl mcl_fpDbl_mulPre3L
+ .align 2
+ .type mcl_fpDbl_mulPre3L,@function
+mcl_fpDbl_mulPre3L: // @mcl_fpDbl_mulPre3L
+// BB#0:
+ stp x20, x19, [sp, #-16]!
+ ldp x8, x9, [x1]
+ ldp x10, x12, [x2]
+ ldr x11, [x1, #16]
+ ldr x13, [x2, #16]
+ mul x14, x8, x10
+ umulh x15, x11, x10
+ mul x16, x11, x10
+ umulh x17, x9, x10
+ mul x18, x9, x10
+ umulh x10, x8, x10
+ mul x1, x8, x12
+ mul x2, x11, x12
+ mul x3, x9, x12
+ umulh x4, x11, x12
+ umulh x5, x9, x12
+ umulh x12, x8, x12
+ mul x6, x8, x13
+ mul x7, x11, x13
+ mul x19, x9, x13
+ umulh x8, x8, x13
+ umulh x9, x9, x13
+ umulh x11, x11, x13
+ str x14, [x0]
+ adds x10, x10, x18
+ adcs x13, x17, x16
+ adcs x14, x15, xzr
+ adds x10, x10, x1
+ str x10, [x0, #8]
+ adcs x10, x13, x3
+ adcs x13, x14, x2
+ adcs x14, xzr, xzr
+ adds x10, x10, x12
+ adcs x12, x13, x5
+ adcs x13, x14, x4
+ adds x10, x10, x6
+ str x10, [x0, #16]
+ adcs x10, x12, x19
+ adcs x12, x13, x7
+ adcs x13, xzr, xzr
+ adds x8, x10, x8
+ str x8, [x0, #24]
+ adcs x8, x12, x9
+ str x8, [x0, #32]
+ adcs x8, x13, x11
+ str x8, [x0, #40]
+ ldp x20, x19, [sp], #16
+ ret
+.Lfunc_end36:
+ .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L
+
+ .globl mcl_fpDbl_sqrPre3L
+ .align 2
+ .type mcl_fpDbl_sqrPre3L,@function
+mcl_fpDbl_sqrPre3L: // @mcl_fpDbl_sqrPre3L
+// BB#0:
+ ldp x8, x10, [x1]
+ ldr x9, [x1, #16]
+ mul x11, x8, x8
+ umulh x12, x9, x8
+ mul x13, x9, x8
+ umulh x14, x10, x8
+ mul x15, x10, x8
+ umulh x8, x8, x8
+ mul x16, x9, x10
+ str x11, [x0]
+ adds x8, x8, x15
+ adcs x11, x14, x13
+ adcs x17, x12, xzr
+ adds x8, x8, x15
+ mul x15, x10, x10
+ str x8, [x0, #8]
+ umulh x8, x9, x10
+ umulh x10, x10, x10
+ adcs x11, x11, x15
+ adcs x15, x17, x16
+ adcs x17, xzr, xzr
+ adds x11, x11, x14
+ umulh x14, x9, x9
+ mul x9, x9, x9
+ adcs x10, x15, x10
+ adcs x15, x17, x8
+ adds x12, x12, x16
+ adcs x8, x8, x9
+ adcs x9, x14, xzr
+ adds x11, x13, x11
+ adcs x10, x12, x10
+ stp x11, x10, [x0, #16]
+ adcs x8, x8, x15
+ str x8, [x0, #32]
+ adcs x8, x9, xzr
+ str x8, [x0, #40]
+ ret
+.Lfunc_end37:
+ .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L
+
+ .globl mcl_fp_mont3L
+ .align 2
+ .type mcl_fp_mont3L,@function
+mcl_fp_mont3L: // @mcl_fp_mont3L
+// BB#0:
+ stp x24, x23, [sp, #-48]!
+ stp x22, x21, [sp, #16]
+ stp x20, x19, [sp, #32]
+ ldp x15, x16, [x2]
+ ldp x13, x14, [x1, #8]
+ ldr x12, [x1]
+ ldur x11, [x3, #-8]
+ ldp x9, x8, [x3, #8]
+ ldr x10, [x3]
+ ldr x17, [x2, #16]
+ umulh x18, x14, x15
+ mul x1, x14, x15
+ umulh x2, x13, x15
+ mul x3, x13, x15
+ umulh x4, x12, x15
+ mul x15, x12, x15
+ umulh x5, x16, x14
+ mul x6, x16, x14
+ umulh x7, x16, x13
+ mul x19, x16, x13
+ umulh x20, x16, x12
+ mul x16, x16, x12
+ umulh x21, x17, x14
+ mul x14, x17, x14
+ adds x3, x4, x3
+ mul x4, x15, x11
+ adcs x1, x2, x1
+ mul x2, x4, x8
+ mul x22, x4, x9
+ umulh x23, x4, x10
+ adcs x18, x18, xzr
+ adds x22, x23, x22
+ umulh x23, x4, x9
+ adcs x2, x23, x2
+ umulh x23, x4, x8
+ mul x4, x4, x10
+ adcs x23, x23, xzr
+ cmn x4, x15
+ umulh x15, x17, x13
+ mul x13, x17, x13
+ umulh x4, x17, x12
+ mul x12, x17, x12
+ adcs x17, x22, x3
+ adcs x1, x2, x1
+ adcs x18, x23, x18
+ adcs x2, xzr, xzr
+ adds x3, x20, x19
+ adcs x6, x7, x6
+ adcs x5, x5, xzr
+ adds x16, x17, x16
+ adcs x17, x1, x3
+ mul x1, x16, x11
+ adcs x18, x18, x6
+ mul x3, x1, x8
+ mul x6, x1, x9
+ umulh x7, x1, x10
+ adcs x2, x2, x5
+ adcs x5, xzr, xzr
+ adds x6, x7, x6
+ umulh x7, x1, x9
+ adcs x3, x7, x3
+ umulh x7, x1, x8
+ mul x1, x1, x10
+ adcs x7, x7, xzr
+ cmn x1, x16
+ adcs x16, x6, x17
+ adcs x17, x3, x18
+ adcs x18, x7, x2
+ adcs x1, x5, xzr
+ adds x13, x4, x13
+ adcs x14, x15, x14
+ adcs x15, x21, xzr
+ adds x12, x16, x12
+ adcs x13, x17, x13
+ mul x11, x12, x11
+ adcs x14, x18, x14
+ umulh x16, x11, x8
+ mul x17, x11, x8
+ umulh x18, x11, x9
+ mul x2, x11, x9
+ umulh x3, x11, x10
+ mul x11, x11, x10
+ adcs x15, x1, x15
+ adcs x1, xzr, xzr
+ adds x2, x3, x2
+ adcs x17, x18, x17
+ adcs x16, x16, xzr
+ cmn x11, x12
+ adcs x11, x2, x13
+ adcs x12, x17, x14
+ adcs x13, x16, x15
+ adcs x14, x1, xzr
+ subs x10, x11, x10
+ sbcs x9, x12, x9
+ sbcs x8, x13, x8
+ sbcs x14, x14, xzr
+ tst x14, #0x1
+ csel x10, x11, x10, ne
+ csel x9, x12, x9, ne
+ csel x8, x13, x8, ne
+ stp x10, x9, [x0]
+ str x8, [x0, #16]
+ ldp x20, x19, [sp, #32]
+ ldp x22, x21, [sp, #16]
+ ldp x24, x23, [sp], #48
+ ret
+.Lfunc_end38:
+ .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L
+
+ .globl mcl_fp_montNF3L
+ .align 2
+ .type mcl_fp_montNF3L,@function
+mcl_fp_montNF3L: // @mcl_fp_montNF3L
+// BB#0:
+ stp x22, x21, [sp, #-32]!
+ stp x20, x19, [sp, #16]
+ ldp x14, x16, [x2]
+ ldp x15, x13, [x1, #8]
+ ldr x12, [x1]
+ ldur x11, [x3, #-8]
+ ldp x9, x8, [x3, #8]
+ ldr x10, [x3]
+ ldr x17, [x2, #16]
+ umulh x18, x13, x14
+ mul x1, x13, x14
+ umulh x2, x15, x14
+ mul x3, x15, x14
+ umulh x4, x12, x14
+ mul x14, x12, x14
+ umulh x5, x16, x13
+ mul x6, x16, x13
+ umulh x7, x16, x15
+ mul x19, x16, x15
+ umulh x20, x16, x12
+ mul x16, x16, x12
+ umulh x21, x17, x13
+ mul x13, x17, x13
+ adds x3, x4, x3
+ mul x4, x14, x11
+ adcs x1, x2, x1
+ mul x2, x4, x10
+ adcs x18, x18, xzr
+ cmn x2, x14
+ umulh x14, x17, x15
+ mul x15, x17, x15
+ umulh x2, x17, x12
+ mul x12, x17, x12
+ mul x17, x4, x9
+ adcs x17, x17, x3
+ mul x3, x4, x8
+ adcs x1, x3, x1
+ umulh x3, x4, x10
+ adcs x18, x18, xzr
+ adds x17, x17, x3
+ umulh x3, x4, x9
+ adcs x1, x1, x3
+ umulh x3, x4, x8
+ adcs x18, x18, x3
+ adds x3, x20, x19
+ adcs x4, x7, x6
+ adcs x5, x5, xzr
+ adds x16, x16, x17
+ adcs x17, x3, x1
+ mul x1, x16, x11
+ adcs x18, x4, x18
+ mul x3, x1, x8
+ mul x4, x1, x10
+ adcs x5, x5, xzr
+ cmn x4, x16
+ mul x16, x1, x9
+ umulh x4, x1, x8
+ adcs x16, x16, x17
+ umulh x17, x1, x9
+ umulh x1, x1, x10
+ adcs x18, x3, x18
+ adcs x3, x5, xzr
+ adds x16, x16, x1
+ adcs x17, x18, x17
+ adcs x18, x3, x4
+ adds x15, x2, x15
+ adcs x13, x14, x13
+ adcs x14, x21, xzr
+ adds x12, x12, x16
+ adcs x15, x15, x17
+ mul x11, x12, x11
+ adcs x13, x13, x18
+ mul x16, x11, x8
+ mul x17, x11, x9
+ mul x18, x11, x10
+ umulh x1, x11, x8
+ umulh x2, x11, x9
+ umulh x11, x11, x10
+ adcs x14, x14, xzr
+ cmn x18, x12
+ adcs x12, x17, x15
+ adcs x13, x16, x13
+ adcs x14, x14, xzr
+ adds x11, x12, x11
+ adcs x12, x13, x2
+ adcs x13, x14, x1
+ subs x10, x11, x10
+ sbcs x9, x12, x9
+ sbcs x8, x13, x8
+ asr x14, x8, #63
+ cmp x14, #0 // =0
+ csel x10, x11, x10, lt
+ csel x9, x12, x9, lt
+ csel x8, x13, x8, lt
+ stp x10, x9, [x0]
+ str x8, [x0, #16]
+ ldp x20, x19, [sp, #16]
+ ldp x22, x21, [sp], #32
+ ret
+.Lfunc_end39:
+ .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L
+
+ .globl mcl_fp_montRed3L
+ .align 2
+ .type mcl_fp_montRed3L,@function
+mcl_fp_montRed3L: // @mcl_fp_montRed3L
+// BB#0:
+ ldur x8, [x2, #-8]
+ ldp x9, x17, [x1]
+ ldp x12, x10, [x2, #8]
+ ldr x11, [x2]
+ ldp x13, x14, [x1, #32]
+ ldp x15, x16, [x1, #16]
+ mul x18, x9, x8
+ umulh x1, x18, x10
+ mul x2, x18, x10
+ umulh x3, x18, x12
+ mul x4, x18, x12
+ umulh x5, x18, x11
+ mul x18, x18, x11
+ adds x4, x5, x4
+ adcs x2, x3, x2
+ adcs x1, x1, xzr
+ cmn x9, x18
+ adcs x9, x17, x4
+ adcs x15, x15, x2
+ mul x17, x9, x8
+ adcs x16, x16, x1
+ umulh x18, x17, x10
+ mul x1, x17, x10
+ umulh x2, x17, x12
+ mul x3, x17, x12
+ umulh x4, x17, x11
+ mul x17, x17, x11
+ adcs x13, x13, xzr
+ adcs x14, x14, xzr
+ adcs x5, xzr, xzr
+ adds x3, x4, x3
+ adcs x1, x2, x1
+ adcs x18, x18, xzr
+ cmn x17, x9
+ adcs x9, x3, x15
+ adcs x15, x1, x16
+ mul x8, x9, x8
+ adcs x13, x18, x13
+ umulh x16, x8, x10
+ mul x17, x8, x10
+ umulh x18, x8, x12
+ mul x1, x8, x12
+ umulh x2, x8, x11
+ mul x8, x8, x11
+ adcs x14, x14, xzr
+ adcs x3, x5, xzr
+ adds x1, x2, x1
+ adcs x17, x18, x17
+ adcs x16, x16, xzr
+ cmn x8, x9
+ adcs x8, x1, x15
+ adcs x9, x17, x13
+ adcs x13, x16, x14
+ adcs x14, x3, xzr
+ subs x11, x8, x11
+ sbcs x12, x9, x12
+ sbcs x10, x13, x10
+ sbcs x14, x14, xzr
+ tst x14, #0x1
+ csel x8, x8, x11, ne
+ csel x9, x9, x12, ne
+ csel x10, x13, x10, ne
+ stp x8, x9, [x0]
+ str x10, [x0, #16]
+ ret
+.Lfunc_end40:
+ .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L
+
+ .globl mcl_fp_addPre3L
+ .align 2
+ .type mcl_fp_addPre3L,@function
+mcl_fp_addPre3L: // @mcl_fp_addPre3L
+// BB#0:
+ ldp x11, x8, [x2, #8]
+ ldp x9, x12, [x1]
+ ldr x10, [x2]
+ ldr x13, [x1, #16]
+ adds x9, x10, x9
+ str x9, [x0]
+ adcs x9, x11, x12
+ str x9, [x0, #8]
+ adcs x9, x8, x13
+ adcs x8, xzr, xzr
+ str x9, [x0, #16]
+ mov x0, x8
+ ret
+.Lfunc_end41:
+ .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L
+
+ .globl mcl_fp_subPre3L
+ .align 2
+ .type mcl_fp_subPre3L,@function
+mcl_fp_subPre3L: // @mcl_fp_subPre3L
+// BB#0:
+ ldp x11, x8, [x2, #8]
+ ldp x9, x12, [x1]
+ ldr x10, [x2]
+ ldr x13, [x1, #16]
+ subs x9, x9, x10
+ str x9, [x0]
+ sbcs x9, x12, x11
+ str x9, [x0, #8]
+ sbcs x9, x13, x8
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ str x9, [x0, #16]
+ mov x0, x8
+ ret
+.Lfunc_end42:
+ .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L
+
+ .globl mcl_fp_shr1_3L
+ .align 2
+ .type mcl_fp_shr1_3L,@function
+mcl_fp_shr1_3L: // @mcl_fp_shr1_3L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldr x10, [x1, #16]
+ extr x8, x9, x8, #1
+ extr x9, x10, x9, #1
+ lsr x10, x10, #1
+ stp x8, x9, [x0]
+ str x10, [x0, #16]
+ ret
+.Lfunc_end43:
+ .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L
+
+ .globl mcl_fp_add3L
+ .align 2
+ .type mcl_fp_add3L,@function
+mcl_fp_add3L: // @mcl_fp_add3L
+// BB#0:
+ ldp x11, x8, [x2, #8]
+ ldp x9, x12, [x1]
+ ldr x10, [x2]
+ ldr x13, [x1, #16]
+ adds x9, x10, x9
+ adcs x11, x11, x12
+ ldr x10, [x3]
+ ldp x12, x14, [x3, #8]
+ stp x9, x11, [x0]
+ adcs x8, x8, x13
+ str x8, [x0, #16]
+ adcs x13, xzr, xzr
+ subs x10, x9, x10
+ sbcs x9, x11, x12
+ sbcs x8, x8, x14
+ sbcs x11, x13, xzr
+ and w11, w11, #0x1
+ tbnz w11, #0, .LBB44_2
+// BB#1: // %nocarry
+ stp x10, x9, [x0]
+ str x8, [x0, #16]
+.LBB44_2: // %carry
+ ret
+.Lfunc_end44:
+ .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L
+
+ .globl mcl_fp_addNF3L
+ .align 2
+ .type mcl_fp_addNF3L,@function
+mcl_fp_addNF3L: // @mcl_fp_addNF3L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x2]
+ ldr x12, [x1, #16]
+ ldr x13, [x2, #16]
+ adds x8, x10, x8
+ adcs x9, x11, x9
+ ldp x10, x11, [x3]
+ ldr x14, [x3, #16]
+ adcs x12, x13, x12
+ subs x10, x8, x10
+ sbcs x11, x9, x11
+ sbcs x13, x12, x14
+ asr x14, x13, #63
+ cmp x14, #0 // =0
+ csel x8, x8, x10, lt
+ csel x9, x9, x11, lt
+ csel x10, x12, x13, lt
+ stp x8, x9, [x0]
+ str x10, [x0, #16]
+ ret
+.Lfunc_end45:
+ .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L
+
+ .globl mcl_fp_sub3L
+ .align 2
+ .type mcl_fp_sub3L,@function
+mcl_fp_sub3L: // @mcl_fp_sub3L
+// BB#0:
+ ldp x11, x10, [x2, #8]
+ ldp x8, x12, [x1]
+ ldr x9, [x2]
+ ldr x13, [x1, #16]
+ subs x8, x8, x9
+ sbcs x9, x12, x11
+ stp x8, x9, [x0]
+ sbcs x10, x13, x10
+ str x10, [x0, #16]
+ ngcs x11, xzr
+ and w11, w11, #0x1
+ tbnz w11, #0, .LBB46_2
+// BB#1: // %nocarry
+ ret
+.LBB46_2: // %carry
+ ldp x13, x11, [x3, #8]
+ ldr x12, [x3]
+ adds x8, x12, x8
+ adcs x9, x13, x9
+ adcs x10, x11, x10
+ stp x8, x9, [x0]
+ str x10, [x0, #16]
+ ret
+.Lfunc_end46:
+ .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L
+
+ .globl mcl_fp_subNF3L
+ .align 2
+ .type mcl_fp_subNF3L,@function
+mcl_fp_subNF3L: // @mcl_fp_subNF3L
+// BB#0:
+ ldp x8, x9, [x2]
+ ldp x10, x11, [x1]
+ ldr x12, [x2, #16]
+ ldr x13, [x1, #16]
+ subs x8, x10, x8
+ sbcs x9, x11, x9
+ ldp x10, x11, [x3]
+ ldr x14, [x3, #16]
+ sbcs x12, x13, x12
+ asr x13, x12, #63
+ and x11, x13, x11
+ and x14, x13, x14
+ extr x13, x13, x12, #63
+ and x10, x13, x10
+ adds x8, x10, x8
+ str x8, [x0]
+ adcs x8, x11, x9
+ str x8, [x0, #8]
+ adcs x8, x14, x12
+ str x8, [x0, #16]
+ ret
+.Lfunc_end47:
+ .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L
+
+ .globl mcl_fpDbl_add3L
+ .align 2
+ .type mcl_fpDbl_add3L,@function
+mcl_fpDbl_add3L: // @mcl_fpDbl_add3L
+// BB#0:
+ ldp x8, x9, [x2, #32]
+ ldp x10, x11, [x1, #32]
+ ldp x12, x13, [x2, #16]
+ ldp x15, x18, [x2]
+ ldp x16, x17, [x1, #16]
+ ldp x14, x1, [x1]
+ adds x14, x15, x14
+ ldr x15, [x3, #16]
+ str x14, [x0]
+ ldp x14, x2, [x3]
+ adcs x18, x18, x1
+ adcs x12, x12, x16
+ stp x18, x12, [x0, #8]
+ adcs x12, x13, x17
+ adcs x8, x8, x10
+ adcs x9, x9, x11
+ adcs x10, xzr, xzr
+ subs x11, x12, x14
+ sbcs x13, x8, x2
+ sbcs x14, x9, x15
+ sbcs x10, x10, xzr
+ tst x10, #0x1
+ csel x10, x12, x11, ne
+ csel x8, x8, x13, ne
+ csel x9, x9, x14, ne
+ stp x10, x8, [x0, #24]
+ str x9, [x0, #40]
+ ret
+.Lfunc_end48:
+ .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L
+
+ .globl mcl_fpDbl_sub3L
+ .align 2
+ .type mcl_fpDbl_sub3L,@function
+mcl_fpDbl_sub3L: // @mcl_fpDbl_sub3L
+// BB#0:
+ ldp x8, x9, [x2, #32]
+ ldp x10, x11, [x1, #32]
+ ldp x12, x13, [x2, #16]
+ ldp x14, x18, [x2]
+ ldp x16, x17, [x1, #16]
+ ldp x15, x1, [x1]
+ subs x14, x15, x14
+ ldr x15, [x3, #16]
+ str x14, [x0]
+ ldp x14, x2, [x3]
+ sbcs x18, x1, x18
+ sbcs x12, x16, x12
+ stp x18, x12, [x0, #8]
+ sbcs x12, x17, x13
+ sbcs x8, x10, x8
+ sbcs x9, x11, x9
+ ngcs x10, xzr
+ tst x10, #0x1
+ csel x10, x15, xzr, ne
+ csel x11, x2, xzr, ne
+ csel x13, x14, xzr, ne
+ adds x12, x13, x12
+ adcs x8, x11, x8
+ stp x12, x8, [x0, #24]
+ adcs x8, x10, x9
+ str x8, [x0, #40]
+ ret
+.Lfunc_end49:
+ .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L
+
+ .globl mcl_fp_mulUnitPre4L
+ .align 2
+ .type mcl_fp_mulUnitPre4L,@function
+mcl_fp_mulUnitPre4L: // @mcl_fp_mulUnitPre4L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x1, #16]
+ mul x12, x8, x2
+ mul x13, x9, x2
+ umulh x8, x8, x2
+ mul x14, x10, x2
+ umulh x9, x9, x2
+ mul x15, x11, x2
+ umulh x10, x10, x2
+ umulh x11, x11, x2
+ adds x8, x8, x13
+ stp x12, x8, [x0]
+ adcs x8, x9, x14
+ str x8, [x0, #16]
+ adcs x8, x10, x15
+ str x8, [x0, #24]
+ adcs x8, x11, xzr
+ str x8, [x0, #32]
+ ret
+.Lfunc_end50:
+ .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L
+
+ .globl mcl_fpDbl_mulPre4L
+ .align 2
+ .type mcl_fpDbl_mulPre4L,@function
+mcl_fpDbl_mulPre4L: // @mcl_fpDbl_mulPre4L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #32 // =32
+ ldp x8, x10, [x1]
+ ldp x9, x11, [x1]
+ ldp x12, x14, [x1, #16]
+ ldp x13, x1, [x1, #16]
+ ldp x15, x16, [x2]
+ ldp x17, x18, [x2, #16]
+ mul x2, x8, x15
+ umulh x3, x14, x15
+ mul x4, x14, x15
+ umulh x5, x12, x15
+ mul x6, x12, x15
+ umulh x7, x10, x15
+ mul x19, x10, x15
+ umulh x15, x8, x15
+ mul x20, x8, x16
+ mul x21, x14, x16
+ mul x22, x12, x16
+ mul x23, x10, x16
+ umulh x24, x14, x16
+ umulh x25, x12, x16
+ umulh x26, x10, x16
+ umulh x16, x8, x16
+ mul x27, x8, x17
+ mul x28, x14, x17
+ mul x29, x12, x17
+ mul x30, x10, x17
+ umulh x14, x14, x17
+ stp x3, x14, [sp, #16]
+ umulh x12, x12, x17
+ str x12, [sp, #8] // 8-byte Folded Spill
+ umulh x3, x10, x17
+ umulh x14, x8, x17
+ mul x17, x9, x18
+ umulh x12, x9, x18
+ mul x10, x11, x18
+ umulh x11, x11, x18
+ mul x9, x13, x18
+ umulh x13, x13, x18
+ mul x8, x1, x18
+ umulh x18, x1, x18
+ str x2, [x0]
+ adds x15, x15, x19
+ adcs x1, x7, x6
+ adcs x2, x5, x4
+ ldr x4, [sp, #16] // 8-byte Folded Reload
+ adcs x4, x4, xzr
+ adds x15, x20, x15
+ str x15, [x0, #8]
+ adcs x15, x23, x1
+ adcs x1, x22, x2
+ adcs x2, x21, x4
+ adcs x4, xzr, xzr
+ adds x15, x15, x16
+ adcs x16, x1, x26
+ adcs x1, x2, x25
+ adcs x2, x4, x24
+ adds x15, x15, x27
+ str x15, [x0, #16]
+ adcs x15, x16, x30
+ adcs x16, x1, x29
+ adcs x1, x2, x28
+ adcs x2, xzr, xzr
+ adds x14, x15, x14
+ adcs x15, x16, x3
+ ldr x16, [sp, #8] // 8-byte Folded Reload
+ adcs x16, x1, x16
+ ldr x1, [sp, #24] // 8-byte Folded Reload
+ adcs x1, x2, x1
+ adds x14, x14, x17
+ str x14, [x0, #24]
+ adcs x10, x15, x10
+ adcs x9, x16, x9
+ adcs x8, x1, x8
+ adcs x14, xzr, xzr
+ adds x10, x10, x12
+ adcs x9, x9, x11
+ stp x10, x9, [x0, #32]
+ adcs x8, x8, x13
+ str x8, [x0, #48]
+ adcs x8, x14, x18
+ str x8, [x0, #56]
+ add sp, sp, #32 // =32
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end51:
+ .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L
+
+ .globl mcl_fpDbl_sqrPre4L
+ .align 2
+ .type mcl_fpDbl_sqrPre4L,@function
+mcl_fpDbl_sqrPre4L: // @mcl_fpDbl_sqrPre4L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x13, [x1]
+ ldp x11, x12, [x1, #16]
+ ldr x14, [x1, #16]
+ mul x15, x10, x10
+ umulh x16, x12, x10
+ mul x17, x12, x10
+ umulh x18, x14, x10
+ mul x2, x14, x10
+ umulh x3, x9, x10
+ mul x4, x9, x10
+ umulh x10, x10, x10
+ str x15, [x0]
+ adds x10, x10, x4
+ adcs x15, x3, x2
+ adcs x17, x18, x17
+ adcs x16, x16, xzr
+ adds x10, x10, x4
+ mul x4, x12, x9
+ str x10, [x0, #8]
+ mul x10, x9, x9
+ adcs x10, x15, x10
+ mul x15, x14, x9
+ adcs x17, x17, x15
+ adcs x16, x16, x4
+ adcs x4, xzr, xzr
+ adds x10, x10, x3
+ umulh x3, x9, x9
+ adcs x17, x17, x3
+ umulh x3, x12, x9
+ umulh x9, x14, x9
+ adcs x16, x16, x9
+ adcs x3, x4, x3
+ ldr x1, [x1, #24]
+ adds x10, x10, x2
+ mul x2, x12, x14
+ str x10, [x0, #16]
+ mul x10, x14, x14
+ umulh x12, x12, x14
+ umulh x14, x14, x14
+ adcs x15, x17, x15
+ mul x17, x8, x1
+ adcs x10, x16, x10
+ mul x16, x11, x1
+ adcs x2, x3, x2
+ adcs x3, xzr, xzr
+ adds x15, x15, x18
+ mul x18, x13, x1
+ adcs x9, x10, x9
+ mul x10, x1, x1
+ umulh x8, x8, x1
+ umulh x13, x13, x1
+ umulh x11, x11, x1
+ umulh x1, x1, x1
+ adcs x14, x2, x14
+ adcs x12, x3, x12
+ adds x15, x15, x17
+ adcs x9, x9, x18
+ adcs x14, x14, x16
+ adcs x10, x12, x10
+ adcs x12, xzr, xzr
+ adds x8, x9, x8
+ stp x15, x8, [x0, #24]
+ adcs x8, x14, x13
+ str x8, [x0, #40]
+ adcs x8, x10, x11
+ str x8, [x0, #48]
+ adcs x8, x12, x1
+ str x8, [x0, #56]
+ ret
+.Lfunc_end52:
+ .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L
+
+ .globl mcl_fp_mont4L
+ .align 2
+ .type mcl_fp_mont4L,@function
+mcl_fp_mont4L: // @mcl_fp_mont4L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #16 // =16
+ str x0, [sp, #8] // 8-byte Folded Spill
+ ldp x13, x16, [x1, #16]
+ ldp x14, x15, [x1]
+ ldur x0, [x3, #-8]
+ ldp x9, x8, [x3, #16]
+ ldp x11, x10, [x3]
+ ldp x17, x18, [x2]
+ ldp x1, x2, [x2, #16]
+ umulh x3, x16, x17
+ mul x4, x16, x17
+ umulh x5, x13, x17
+ mul x6, x13, x17
+ umulh x7, x15, x17
+ mul x19, x15, x17
+ umulh x20, x14, x17
+ mul x17, x14, x17
+ umulh x21, x18, x16
+ mul x22, x18, x16
+ umulh x23, x18, x13
+ mul x24, x18, x13
+ umulh x25, x18, x15
+ mul x26, x18, x15
+ umulh x27, x18, x14
+ mul x18, x18, x14
+ umulh x28, x1, x16
+ adds x19, x20, x19
+ mul x20, x17, x0
+ adcs x6, x7, x6
+ mul x7, x20, x8
+ mul x29, x20, x9
+ mul x30, x20, x10
+ adcs x4, x5, x4
+ umulh x5, x20, x11
+ adcs x3, x3, xzr
+ adds x5, x5, x30
+ umulh x30, x20, x10
+ adcs x29, x30, x29
+ umulh x30, x20, x9
+ adcs x7, x30, x7
+ umulh x30, x20, x8
+ mul x20, x20, x11
+ adcs x30, x30, xzr
+ cmn x20, x17
+ mul x17, x1, x16
+ umulh x20, x1, x13
+ adcs x5, x5, x19
+ mul x19, x1, x13
+ adcs x6, x29, x6
+ umulh x29, x1, x15
+ adcs x4, x7, x4
+ mul x7, x1, x15
+ adcs x3, x30, x3
+ adcs x30, xzr, xzr
+ adds x26, x27, x26
+ umulh x27, x1, x14
+ mul x1, x1, x14
+ adcs x24, x25, x24
+ umulh x25, x2, x16
+ mul x16, x2, x16
+ adcs x22, x23, x22
+ adcs x21, x21, xzr
+ adds x18, x5, x18
+ adcs x5, x6, x26
+ mul x6, x18, x0
+ adcs x4, x4, x24
+ mul x23, x6, x8
+ mul x24, x6, x9
+ mul x26, x6, x10
+ adcs x3, x3, x22
+ umulh x22, x6, x11
+ adcs x21, x30, x21
+ adcs x30, xzr, xzr
+ adds x22, x22, x26
+ umulh x26, x6, x10
+ adcs x24, x26, x24
+ umulh x26, x6, x9
+ adcs x23, x26, x23
+ umulh x26, x6, x8
+ mul x6, x6, x11
+ adcs x26, x26, xzr
+ cmn x6, x18
+ umulh x18, x2, x13
+ mul x13, x2, x13
+ umulh x6, x2, x15
+ mul x15, x2, x15
+ umulh x12, x2, x14
+ mul x14, x2, x14
+ adcs x2, x22, x5
+ adcs x4, x24, x4
+ adcs x3, x23, x3
+ adcs x5, x26, x21
+ adcs x21, x30, xzr
+ adds x7, x27, x7
+ adcs x19, x29, x19
+ adcs x17, x20, x17
+ adcs x20, x28, xzr
+ adds x1, x2, x1
+ adcs x2, x4, x7
+ mul x4, x1, x0
+ adcs x3, x3, x19
+ mul x7, x4, x8
+ mul x19, x4, x9
+ mul x22, x4, x10
+ adcs x17, x5, x17
+ umulh x5, x4, x11
+ adcs x20, x21, x20
+ adcs x21, xzr, xzr
+ adds x5, x5, x22
+ umulh x22, x4, x10
+ adcs x19, x22, x19
+ umulh x22, x4, x9
+ adcs x7, x22, x7
+ umulh x22, x4, x8
+ mul x4, x4, x11
+ adcs x22, x22, xzr
+ cmn x4, x1
+ adcs x1, x5, x2
+ adcs x2, x19, x3
+ adcs x17, x7, x17
+ adcs x3, x22, x20
+ adcs x4, x21, xzr
+ adds x12, x12, x15
+ adcs x13, x6, x13
+ adcs x15, x18, x16
+ adcs x16, x25, xzr
+ adds x14, x1, x14
+ adcs x12, x2, x12
+ mul x18, x14, x0
+ adcs x13, x17, x13
+ umulh x17, x18, x8
+ mul x0, x18, x8
+ umulh x1, x18, x9
+ mul x2, x18, x9
+ umulh x5, x18, x10
+ mul x6, x18, x10
+ umulh x7, x18, x11
+ mul x18, x18, x11
+ adcs x15, x3, x15
+ adcs x16, x4, x16
+ adcs x3, xzr, xzr
+ adds x4, x7, x6
+ adcs x2, x5, x2
+ adcs x0, x1, x0
+ adcs x17, x17, xzr
+ cmn x18, x14
+ adcs x12, x4, x12
+ adcs x13, x2, x13
+ adcs x14, x0, x15
+ adcs x15, x17, x16
+ adcs x16, x3, xzr
+ subs x11, x12, x11
+ sbcs x10, x13, x10
+ sbcs x9, x14, x9
+ sbcs x8, x15, x8
+ sbcs x16, x16, xzr
+ tst x16, #0x1
+ csel x11, x12, x11, ne
+ csel x10, x13, x10, ne
+ csel x9, x14, x9, ne
+ csel x8, x15, x8, ne
+ ldr x12, [sp, #8] // 8-byte Folded Reload
+ stp x11, x10, [x12]
+ stp x9, x8, [x12, #16]
+ add sp, sp, #16 // =16
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end53:
+ .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L
+
+ .globl mcl_fp_montNF4L
+ .align 2
+ .type mcl_fp_montNF4L,@function
+mcl_fp_montNF4L: // @mcl_fp_montNF4L
+// BB#0:
+ stp x28, x27, [sp, #-80]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ ldp x14, x15, [x1, #16]
+ ldp x13, x16, [x1]
+ ldur x12, [x3, #-8]
+ ldp x9, x8, [x3, #16]
+ ldp x11, x10, [x3]
+ ldp x17, x18, [x2]
+ ldp x1, x2, [x2, #16]
+ umulh x3, x15, x17
+ mul x4, x15, x17
+ umulh x5, x14, x17
+ mul x6, x14, x17
+ umulh x7, x16, x17
+ mul x19, x16, x17
+ umulh x20, x13, x17
+ mul x17, x13, x17
+ umulh x21, x18, x15
+ mul x22, x18, x15
+ umulh x23, x18, x14
+ mul x24, x18, x14
+ umulh x25, x18, x16
+ mul x26, x18, x16
+ umulh x27, x18, x13
+ mul x18, x18, x13
+ adds x19, x20, x19
+ umulh x20, x1, x15
+ adcs x6, x7, x6
+ mul x7, x17, x12
+ adcs x4, x5, x4
+ mul x5, x7, x11
+ adcs x3, x3, xzr
+ cmn x5, x17
+ mul x17, x1, x15
+ mul x5, x7, x10
+ adcs x5, x5, x19
+ mul x19, x7, x9
+ adcs x6, x19, x6
+ mul x19, x7, x8
+ adcs x4, x19, x4
+ umulh x19, x7, x11
+ adcs x3, x3, xzr
+ adds x5, x5, x19
+ umulh x19, x7, x10
+ adcs x6, x6, x19
+ umulh x19, x7, x9
+ adcs x4, x4, x19
+ umulh x19, x1, x14
+ umulh x7, x7, x8
+ adcs x3, x3, x7
+ mul x7, x1, x14
+ adds x26, x27, x26
+ umulh x27, x1, x16
+ adcs x24, x25, x24
+ mul x25, x1, x16
+ adcs x22, x23, x22
+ umulh x23, x1, x13
+ mul x1, x1, x13
+ adcs x21, x21, xzr
+ adds x18, x18, x5
+ umulh x5, x2, x15
+ mul x15, x2, x15
+ adcs x6, x26, x6
+ umulh x26, x2, x14
+ mul x14, x2, x14
+ adcs x4, x24, x4
+ mul x24, x18, x12
+ adcs x3, x22, x3
+ mul x22, x24, x11
+ adcs x21, x21, xzr
+ cmn x22, x18
+ umulh x18, x2, x16
+ mul x16, x2, x16
+ umulh x22, x2, x13
+ mul x13, x2, x13
+ mul x2, x24, x10
+ adcs x2, x2, x6
+ mul x6, x24, x9
+ adcs x4, x6, x4
+ mul x6, x24, x8
+ adcs x3, x6, x3
+ umulh x6, x24, x11
+ adcs x21, x21, xzr
+ adds x2, x2, x6
+ umulh x6, x24, x10
+ adcs x4, x4, x6
+ umulh x6, x24, x9
+ adcs x3, x3, x6
+ umulh x6, x24, x8
+ adcs x6, x21, x6
+ adds x21, x23, x25
+ adcs x7, x27, x7
+ adcs x17, x19, x17
+ adcs x19, x20, xzr
+ adds x1, x1, x2
+ adcs x2, x21, x4
+ mul x4, x1, x12
+ adcs x3, x7, x3
+ mul x7, x4, x8
+ mul x20, x4, x9
+ adcs x17, x17, x6
+ mul x6, x4, x11
+ adcs x19, x19, xzr
+ cmn x6, x1
+ mul x1, x4, x10
+ umulh x6, x4, x8
+ adcs x1, x1, x2
+ umulh x2, x4, x9
+ adcs x3, x20, x3
+ umulh x20, x4, x10
+ umulh x4, x4, x11
+ adcs x17, x7, x17
+ adcs x7, x19, xzr
+ adds x1, x1, x4
+ adcs x3, x3, x20
+ adcs x17, x17, x2
+ adcs x2, x7, x6
+ adds x16, x22, x16
+ adcs x14, x18, x14
+ adcs x15, x26, x15
+ adcs x18, x5, xzr
+ adds x13, x13, x1
+ adcs x16, x16, x3
+ mul x12, x13, x12
+ adcs x14, x14, x17
+ mul x17, x12, x8
+ mul x1, x12, x9
+ mul x3, x12, x10
+ mul x4, x12, x11
+ umulh x5, x12, x8
+ umulh x6, x12, x9
+ umulh x7, x12, x10
+ umulh x12, x12, x11
+ adcs x15, x15, x2
+ adcs x18, x18, xzr
+ cmn x4, x13
+ adcs x13, x3, x16
+ adcs x14, x1, x14
+ adcs x15, x17, x15
+ adcs x16, x18, xzr
+ adds x12, x13, x12
+ adcs x13, x14, x7
+ adcs x14, x15, x6
+ adcs x15, x16, x5
+ subs x11, x12, x11
+ sbcs x10, x13, x10
+ sbcs x9, x14, x9
+ sbcs x8, x15, x8
+ cmp x8, #0 // =0
+ csel x11, x12, x11, lt
+ csel x10, x13, x10, lt
+ csel x9, x14, x9, lt
+ csel x8, x15, x8, lt
+ stp x11, x10, [x0]
+ stp x9, x8, [x0, #16]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #80
+ ret
+.Lfunc_end54:
+ .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L
+
+ .globl mcl_fp_montRed4L
+ .align 2
+ .type mcl_fp_montRed4L,@function
+mcl_fp_montRed4L: // @mcl_fp_montRed4L
+// BB#0:
+ stp x22, x21, [sp, #-32]!
+ stp x20, x19, [sp, #16]
+ ldur x12, [x2, #-8]
+ ldp x9, x8, [x2, #16]
+ ldp x11, x10, [x2]
+ ldp x14, x15, [x1, #48]
+ ldp x16, x17, [x1, #32]
+ ldp x18, x2, [x1, #16]
+ ldp x13, x1, [x1]
+ mul x3, x13, x12
+ umulh x4, x3, x8
+ mul x5, x3, x8
+ umulh x6, x3, x9
+ mul x7, x3, x9
+ umulh x19, x3, x10
+ mul x20, x3, x10
+ umulh x21, x3, x11
+ mul x3, x3, x11
+ adds x20, x21, x20
+ adcs x7, x19, x7
+ adcs x5, x6, x5
+ adcs x4, x4, xzr
+ cmn x13, x3
+ adcs x13, x1, x20
+ adcs x18, x18, x7
+ mul x1, x13, x12
+ adcs x2, x2, x5
+ umulh x3, x1, x8
+ mul x5, x1, x8
+ umulh x6, x1, x9
+ mul x7, x1, x9
+ umulh x19, x1, x10
+ mul x20, x1, x10
+ umulh x21, x1, x11
+ mul x1, x1, x11
+ adcs x16, x16, x4
+ adcs x17, x17, xzr
+ adcs x14, x14, xzr
+ adcs x15, x15, xzr
+ adcs x4, xzr, xzr
+ adds x20, x21, x20
+ adcs x7, x19, x7
+ adcs x5, x6, x5
+ adcs x3, x3, xzr
+ cmn x1, x13
+ adcs x13, x20, x18
+ adcs x18, x7, x2
+ mul x1, x13, x12
+ adcs x16, x5, x16
+ umulh x2, x1, x8
+ mul x5, x1, x8
+ umulh x6, x1, x9
+ mul x7, x1, x9
+ umulh x19, x1, x10
+ mul x20, x1, x10
+ umulh x21, x1, x11
+ mul x1, x1, x11
+ adcs x17, x3, x17
+ adcs x14, x14, xzr
+ adcs x15, x15, xzr
+ adcs x3, x4, xzr
+ adds x4, x21, x20
+ adcs x7, x19, x7
+ adcs x5, x6, x5
+ adcs x2, x2, xzr
+ cmn x1, x13
+ adcs x13, x4, x18
+ adcs x16, x7, x16
+ mul x12, x13, x12
+ adcs x17, x5, x17
+ umulh x18, x12, x8
+ mul x1, x12, x8
+ umulh x4, x12, x9
+ mul x5, x12, x9
+ umulh x6, x12, x10
+ mul x7, x12, x10
+ umulh x19, x12, x11
+ mul x12, x12, x11
+ adcs x14, x2, x14
+ adcs x15, x15, xzr
+ adcs x2, x3, xzr
+ adds x3, x19, x7
+ adcs x5, x6, x5
+ adcs x1, x4, x1
+ adcs x18, x18, xzr
+ cmn x12, x13
+ adcs x12, x3, x16
+ adcs x13, x5, x17
+ adcs x14, x1, x14
+ adcs x15, x18, x15
+ adcs x16, x2, xzr
+ subs x11, x12, x11
+ sbcs x10, x13, x10
+ sbcs x9, x14, x9
+ sbcs x8, x15, x8
+ sbcs x16, x16, xzr
+ tst x16, #0x1
+ csel x11, x12, x11, ne
+ csel x10, x13, x10, ne
+ csel x9, x14, x9, ne
+ csel x8, x15, x8, ne
+ stp x11, x10, [x0]
+ stp x9, x8, [x0, #16]
+ ldp x20, x19, [sp, #16]
+ ldp x22, x21, [sp], #32
+ ret
+.Lfunc_end55:
+ .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L
+
+ .globl mcl_fp_addPre4L
+ .align 2
+ .type mcl_fp_addPre4L,@function
+mcl_fp_addPre4L: // @mcl_fp_addPre4L
+// BB#0:
+ ldp x8, x9, [x2, #16]
+ ldp x10, x11, [x2]
+ ldp x12, x13, [x1]
+ ldp x14, x15, [x1, #16]
+ adds x10, x10, x12
+ str x10, [x0]
+ adcs x10, x11, x13
+ adcs x8, x8, x14
+ stp x10, x8, [x0, #8]
+ adcs x9, x9, x15
+ adcs x8, xzr, xzr
+ str x9, [x0, #24]
+ mov x0, x8
+ ret
+.Lfunc_end56:
+ .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L
+
+ .globl mcl_fp_subPre4L
+ .align 2
+ .type mcl_fp_subPre4L,@function
+mcl_fp_subPre4L: // @mcl_fp_subPre4L
+// BB#0:
+ ldp x8, x9, [x2, #16]
+ ldp x10, x11, [x2]
+ ldp x12, x13, [x1]
+ ldp x14, x15, [x1, #16]
+ subs x10, x12, x10
+ str x10, [x0]
+ sbcs x10, x13, x11
+ sbcs x8, x14, x8
+ stp x10, x8, [x0, #8]
+ sbcs x9, x15, x9
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ str x9, [x0, #24]
+ mov x0, x8
+ ret
+.Lfunc_end57:
+ .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L
+
+ .globl mcl_fp_shr1_4L
+ .align 2
+ .type mcl_fp_shr1_4L,@function
+mcl_fp_shr1_4L: // @mcl_fp_shr1_4L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x1, #16]
+ extr x8, x9, x8, #1
+ extr x9, x10, x9, #1
+ extr x10, x11, x10, #1
+ lsr x11, x11, #1
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ ret
+.Lfunc_end58:
+ .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L
+
+ .globl mcl_fp_add4L
+ .align 2
+ .type mcl_fp_add4L,@function
+mcl_fp_add4L: // @mcl_fp_add4L
+// BB#0:
+ ldp x8, x9, [x2, #16]
+ ldp x10, x11, [x2]
+ ldp x12, x13, [x1]
+ ldp x14, x15, [x1, #16]
+ adds x10, x10, x12
+ adcs x12, x11, x13
+ ldp x11, x13, [x3]
+ stp x10, x12, [x0]
+ adcs x8, x8, x14
+ adcs x14, x9, x15
+ stp x8, x14, [x0, #16]
+ adcs x15, xzr, xzr
+ ldp x9, x16, [x3, #16]
+ subs x11, x10, x11
+ sbcs x10, x12, x13
+ sbcs x9, x8, x9
+ sbcs x8, x14, x16
+ sbcs x12, x15, xzr
+ and w12, w12, #0x1
+ tbnz w12, #0, .LBB59_2
+// BB#1: // %nocarry
+ stp x11, x10, [x0]
+ stp x9, x8, [x0, #16]
+.LBB59_2: // %carry
+ ret
+.Lfunc_end59:
+ .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L
+
+ .globl mcl_fp_addNF4L
+ .align 2
+ .type mcl_fp_addNF4L,@function
+mcl_fp_addNF4L: // @mcl_fp_addNF4L
+// BB#0:
+ ldp x8, x9, [x1, #16]
+ ldp x10, x11, [x1]
+ ldp x12, x13, [x2]
+ ldp x14, x15, [x2, #16]
+ adds x10, x12, x10
+ adcs x11, x13, x11
+ ldp x12, x13, [x3]
+ adcs x8, x14, x8
+ ldp x14, x16, [x3, #16]
+ adcs x9, x15, x9
+ subs x12, x10, x12
+ sbcs x13, x11, x13
+ sbcs x14, x8, x14
+ sbcs x15, x9, x16
+ cmp x15, #0 // =0
+ csel x10, x10, x12, lt
+ csel x11, x11, x13, lt
+ csel x8, x8, x14, lt
+ csel x9, x9, x15, lt
+ stp x10, x11, [x0]
+ stp x8, x9, [x0, #16]
+ ret
+.Lfunc_end60:
+ .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L
+
+ .globl mcl_fp_sub4L
+ .align 2
+ .type mcl_fp_sub4L,@function
+mcl_fp_sub4L: // @mcl_fp_sub4L
+// BB#0:
+ ldp x10, x11, [x2, #16]
+ ldp x8, x9, [x2]
+ ldp x12, x13, [x1]
+ ldp x14, x15, [x1, #16]
+ subs x8, x12, x8
+ sbcs x9, x13, x9
+ stp x8, x9, [x0]
+ sbcs x10, x14, x10
+ sbcs x11, x15, x11
+ stp x10, x11, [x0, #16]
+ ngcs x12, xzr
+ and w12, w12, #0x1
+ tbnz w12, #0, .LBB61_2
+// BB#1: // %nocarry
+ ret
+.LBB61_2: // %carry
+ ldp x12, x13, [x3, #16]
+ ldp x14, x15, [x3]
+ adds x8, x14, x8
+ adcs x9, x15, x9
+ adcs x10, x12, x10
+ adcs x11, x13, x11
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ ret
+.Lfunc_end61:
+ .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L
+
+ .globl mcl_fp_subNF4L
+ .align 2
+ .type mcl_fp_subNF4L,@function
+mcl_fp_subNF4L: // @mcl_fp_subNF4L
+// BB#0:
+ ldp x8, x9, [x2, #16]
+ ldp x10, x11, [x2]
+ ldp x12, x13, [x1]
+ ldp x14, x15, [x1, #16]
+ subs x10, x12, x10
+ sbcs x11, x13, x11
+ ldp x12, x13, [x3, #16]
+ sbcs x8, x14, x8
+ ldp x14, x16, [x3]
+ sbcs x9, x15, x9
+ asr x15, x9, #63
+ and x14, x15, x14
+ and x16, x15, x16
+ and x12, x15, x12
+ and x13, x15, x13
+ adds x10, x14, x10
+ str x10, [x0]
+ adcs x10, x16, x11
+ adcs x8, x12, x8
+ stp x10, x8, [x0, #8]
+ adcs x8, x13, x9
+ str x8, [x0, #24]
+ ret
+.Lfunc_end62:
+ .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L
+
+ .globl mcl_fpDbl_add4L
+ .align 2
+ .type mcl_fpDbl_add4L,@function
+mcl_fpDbl_add4L: // @mcl_fpDbl_add4L
+// BB#0:
+ ldp x8, x9, [x2, #48]
+ ldp x10, x11, [x1, #48]
+ ldp x12, x13, [x2, #32]
+ ldp x14, x15, [x1, #32]
+ ldp x16, x17, [x2, #16]
+ ldp x4, x2, [x2]
+ ldp x5, x6, [x1, #16]
+ ldp x18, x1, [x1]
+ adds x18, x4, x18
+ str x18, [x0]
+ ldp x18, x4, [x3, #16]
+ adcs x1, x2, x1
+ ldp x2, x3, [x3]
+ adcs x16, x16, x5
+ stp x1, x16, [x0, #8]
+ adcs x16, x17, x6
+ str x16, [x0, #24]
+ adcs x12, x12, x14
+ adcs x13, x13, x15
+ adcs x8, x8, x10
+ adcs x9, x9, x11
+ adcs x10, xzr, xzr
+ subs x11, x12, x2
+ sbcs x14, x13, x3
+ sbcs x15, x8, x18
+ sbcs x16, x9, x4
+ sbcs x10, x10, xzr
+ tst x10, #0x1
+ csel x10, x12, x11, ne
+ csel x11, x13, x14, ne
+ csel x8, x8, x15, ne
+ csel x9, x9, x16, ne
+ stp x10, x11, [x0, #32]
+ stp x8, x9, [x0, #48]
+ ret
+.Lfunc_end63:
+ .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L
+
+ .globl mcl_fpDbl_sub4L
+ .align 2
+ .type mcl_fpDbl_sub4L,@function
+mcl_fpDbl_sub4L: // @mcl_fpDbl_sub4L
+// BB#0:
+ ldp x8, x9, [x2, #48]
+ ldp x10, x11, [x1, #48]
+ ldp x12, x13, [x2, #32]
+ ldp x14, x15, [x1, #32]
+ ldp x16, x17, [x2, #16]
+ ldp x18, x2, [x2]
+ ldp x5, x6, [x1, #16]
+ ldp x4, x1, [x1]
+ subs x18, x4, x18
+ str x18, [x0]
+ ldp x18, x4, [x3, #16]
+ sbcs x1, x1, x2
+ ldp x2, x3, [x3]
+ sbcs x16, x5, x16
+ stp x1, x16, [x0, #8]
+ sbcs x16, x6, x17
+ sbcs x12, x14, x12
+ sbcs x13, x15, x13
+ sbcs x8, x10, x8
+ sbcs x9, x11, x9
+ ngcs x10, xzr
+ tst x10, #0x1
+ csel x10, x4, xzr, ne
+ csel x11, x18, xzr, ne
+ csel x14, x3, xzr, ne
+ csel x15, x2, xzr, ne
+ adds x12, x15, x12
+ stp x16, x12, [x0, #24]
+ adcs x12, x14, x13
+ adcs x8, x11, x8
+ stp x12, x8, [x0, #40]
+ adcs x8, x10, x9
+ str x8, [x0, #56]
+ ret
+.Lfunc_end64:
+ .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L
+
+ .globl mcl_fp_mulUnitPre5L
+ .align 2
+ .type mcl_fp_mulUnitPre5L,@function
+mcl_fp_mulUnitPre5L: // @mcl_fp_mulUnitPre5L
+// BB#0:
+ ldp x12, x8, [x1, #24]
+ ldp x9, x10, [x1]
+ ldr x11, [x1, #16]
+ mul x13, x9, x2
+ mul x14, x10, x2
+ umulh x9, x9, x2
+ mul x15, x11, x2
+ umulh x10, x10, x2
+ mul x16, x12, x2
+ umulh x11, x11, x2
+ mul x17, x8, x2
+ umulh x12, x12, x2
+ umulh x8, x8, x2
+ adds x9, x9, x14
+ stp x13, x9, [x0]
+ adcs x9, x10, x15
+ str x9, [x0, #16]
+ adcs x9, x11, x16
+ str x9, [x0, #24]
+ adcs x9, x12, x17
+ adcs x8, x8, xzr
+ stp x9, x8, [x0, #32]
+ ret
+.Lfunc_end65:
+ .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L
+
+ .globl mcl_fpDbl_mulPre5L
+ .align 2
+ .type mcl_fpDbl_mulPre5L,@function
+mcl_fpDbl_mulPre5L: // @mcl_fpDbl_mulPre5L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #176 // =176
+ ldp x8, x10, [x1]
+ ldp x9, x15, [x1]
+ ldp x11, x12, [x1, #24]
+ ldp x13, x14, [x2]
+ ldp x16, x18, [x1, #16]
+ ldr x17, [x1, #16]
+ ldr x3, [x1, #32]
+ ldp x4, x5, [x2, #16]
+ mul x6, x8, x13
+ str x6, [sp, #72] // 8-byte Folded Spill
+ umulh x6, x12, x13
+ str x6, [sp, #168] // 8-byte Folded Spill
+ mul x6, x12, x13
+ str x6, [sp, #152] // 8-byte Folded Spill
+ umulh x6, x11, x13
+ str x6, [sp, #112] // 8-byte Folded Spill
+ mul x6, x11, x13
+ str x6, [sp, #64] // 8-byte Folded Spill
+ umulh x6, x17, x13
+ mul x23, x17, x13
+ umulh x24, x10, x13
+ mul x25, x10, x13
+ umulh x7, x8, x13
+ mul x26, x8, x14
+ mul x13, x12, x14
+ str x13, [sp, #104] // 8-byte Folded Spill
+ mul x13, x11, x14
+ stp x13, x6, [sp, #40]
+ mul x29, x17, x14
+ mul x30, x10, x14
+ umulh x12, x12, x14
+ umulh x11, x11, x14
+ str x11, [sp, #96] // 8-byte Folded Spill
+ umulh x11, x17, x14
+ umulh x27, x10, x14
+ umulh x20, x8, x14
+ mul x8, x9, x4
+ stp x8, x11, [sp, #24]
+ mul x8, x3, x4
+ stp x8, x12, [sp, #136]
+ mul x8, x18, x4
+ str x8, [sp, #88] // 8-byte Folded Spill
+ mul x8, x16, x4
+ str x8, [sp, #16] // 8-byte Folded Spill
+ mul x28, x15, x4
+ umulh x8, x3, x4
+ str x8, [sp, #160] // 8-byte Folded Spill
+ umulh x8, x18, x4
+ str x8, [sp, #128] // 8-byte Folded Spill
+ umulh x8, x16, x4
+ str x8, [sp, #80] // 8-byte Folded Spill
+ umulh x8, x15, x4
+ str x8, [sp, #8] // 8-byte Folded Spill
+ umulh x22, x9, x4
+ mul x8, x3, x5
+ str x8, [sp, #120] // 8-byte Folded Spill
+ umulh x8, x3, x5
+ str x8, [sp, #56] // 8-byte Folded Spill
+ mul x6, x18, x5
+ umulh x21, x18, x5
+ mul x3, x16, x5
+ umulh x19, x16, x5
+ mul x17, x15, x5
+ umulh x4, x15, x5
+ mul x16, x9, x5
+ umulh x18, x9, x5
+ ldr x2, [x2, #32]
+ ldp x10, x5, [x1, #16]
+ ldp x8, x9, [x1]
+ ldr x1, [x1, #32]
+ mul x15, x8, x2
+ umulh x14, x8, x2
+ mul x12, x9, x2
+ umulh x13, x9, x2
+ mul x11, x10, x2
+ umulh x10, x10, x2
+ mul x9, x5, x2
+ umulh x5, x5, x2
+ mul x8, x1, x2
+ umulh x1, x1, x2
+ ldr x2, [sp, #72] // 8-byte Folded Reload
+ str x2, [x0]
+ adds x2, x7, x25
+ adcs x7, x24, x23
+ ldr x23, [sp, #64] // 8-byte Folded Reload
+ ldr x24, [sp, #48] // 8-byte Folded Reload
+ adcs x23, x24, x23
+ ldr x24, [sp, #152] // 8-byte Folded Reload
+ ldr x25, [sp, #112] // 8-byte Folded Reload
+ adcs x24, x25, x24
+ ldr x25, [sp, #168] // 8-byte Folded Reload
+ adcs x25, x25, xzr
+ adds x2, x26, x2
+ str x2, [x0, #8]
+ adcs x2, x30, x7
+ adcs x7, x29, x23
+ ldr x23, [sp, #40] // 8-byte Folded Reload
+ adcs x23, x23, x24
+ ldr x24, [sp, #104] // 8-byte Folded Reload
+ adcs x24, x24, x25
+ adcs x25, xzr, xzr
+ adds x2, x2, x20
+ adcs x7, x7, x27
+ ldr x20, [sp, #32] // 8-byte Folded Reload
+ adcs x20, x23, x20
+ ldr x23, [sp, #96] // 8-byte Folded Reload
+ adcs x23, x24, x23
+ ldr x24, [sp, #144] // 8-byte Folded Reload
+ adcs x24, x25, x24
+ ldr x25, [sp, #24] // 8-byte Folded Reload
+ adds x2, x25, x2
+ str x2, [x0, #16]
+ adcs x2, x28, x7
+ ldr x7, [sp, #16] // 8-byte Folded Reload
+ adcs x7, x7, x20
+ ldr x20, [sp, #88] // 8-byte Folded Reload
+ adcs x20, x20, x23
+ ldr x23, [sp, #136] // 8-byte Folded Reload
+ adcs x23, x23, x24
+ adcs x24, xzr, xzr
+ adds x2, x2, x22
+ ldr x22, [sp, #8] // 8-byte Folded Reload
+ adcs x7, x7, x22
+ ldr x22, [sp, #80] // 8-byte Folded Reload
+ adcs x20, x20, x22
+ ldr x22, [sp, #128] // 8-byte Folded Reload
+ adcs x22, x23, x22
+ ldr x23, [sp, #160] // 8-byte Folded Reload
+ adcs x23, x24, x23
+ adds x16, x16, x2
+ str x16, [x0, #24]
+ adcs x16, x17, x7
+ adcs x17, x3, x20
+ adcs x2, x6, x22
+ ldr x3, [sp, #120] // 8-byte Folded Reload
+ adcs x3, x3, x23
+ adcs x6, xzr, xzr
+ adds x16, x16, x18
+ adcs x17, x17, x4
+ adcs x18, x2, x19
+ adcs x2, x3, x21
+ ldr x3, [sp, #56] // 8-byte Folded Reload
+ adcs x3, x6, x3
+ adds x15, x15, x16
+ str x15, [x0, #32]
+ adcs x12, x12, x17
+ adcs x11, x11, x18
+ adcs x9, x9, x2
+ adcs x8, x8, x3
+ adcs x15, xzr, xzr
+ adds x12, x12, x14
+ adcs x11, x11, x13
+ stp x12, x11, [x0, #40]
+ adcs x9, x9, x10
+ adcs x8, x8, x5
+ stp x9, x8, [x0, #56]
+ adcs x8, x15, x1
+ str x8, [x0, #72]
+ add sp, sp, #176 // =176
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end66:
+ .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L
+
+ .globl mcl_fpDbl_sqrPre5L
+ .align 2
+ .type mcl_fpDbl_sqrPre5L,@function
+mcl_fpDbl_sqrPre5L: // @mcl_fpDbl_sqrPre5L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x1, #16]
+ ldp x12, x15, [x1]
+ ldp x13, x14, [x1, #24]
+ ldr x16, [x1, #16]
+ mul x17, x12, x12
+ mul x18, x14, x12
+ mul x2, x11, x12
+ umulh x3, x16, x12
+ mul x4, x16, x12
+ umulh x5, x9, x12
+ mul x6, x9, x12
+ str x17, [x0]
+ umulh x17, x12, x12
+ adds x17, x17, x6
+ adcs x4, x5, x4
+ adcs x2, x3, x2
+ umulh x3, x11, x12
+ adcs x18, x3, x18
+ umulh x12, x14, x12
+ adcs x12, x12, xzr
+ adds x17, x6, x17
+ ldr x3, [x1]
+ str x17, [x0, #8]
+ mul x17, x9, x9
+ adcs x17, x17, x4
+ mul x4, x16, x9
+ adcs x2, x4, x2
+ mul x4, x11, x9
+ adcs x18, x4, x18
+ mul x4, x14, x9
+ adcs x12, x4, x12
+ adcs x4, xzr, xzr
+ adds x17, x17, x5
+ umulh x5, x9, x9
+ adcs x2, x2, x5
+ umulh x5, x16, x9
+ adcs x18, x18, x5
+ ldr x5, [x1, #8]
+ umulh x11, x11, x9
+ adcs x11, x12, x11
+ ldr x12, [x1, #24]
+ umulh x9, x14, x9
+ adcs x9, x4, x9
+ mul x4, x3, x16
+ adds x17, x4, x17
+ mul x4, x14, x16
+ str x17, [x0, #16]
+ mul x17, x5, x16
+ adcs x17, x17, x2
+ mul x2, x16, x16
+ adcs x18, x2, x18
+ mul x2, x12, x16
+ adcs x11, x2, x11
+ umulh x2, x3, x16
+ adcs x9, x4, x9
+ adcs x4, xzr, xzr
+ adds x17, x17, x2
+ umulh x2, x5, x16
+ adcs x18, x18, x2
+ umulh x2, x16, x16
+ adcs x11, x11, x2
+ umulh x14, x14, x16
+ umulh x16, x12, x16
+ adcs x9, x9, x16
+ ldr x16, [x1, #32]
+ adcs x14, x4, x14
+ mul x1, x3, x12
+ adds x17, x1, x17
+ mul x1, x16, x12
+ str x17, [x0, #24]
+ mul x17, x5, x12
+ adcs x17, x17, x18
+ mul x18, x10, x12
+ adcs x11, x18, x11
+ mul x18, x12, x12
+ adcs x9, x18, x9
+ umulh x18, x16, x12
+ umulh x2, x3, x12
+ adcs x14, x1, x14
+ adcs x1, xzr, xzr
+ adds x17, x17, x2
+ umulh x2, x10, x12
+ umulh x3, x5, x12
+ umulh x12, x12, x12
+ adcs x11, x11, x3
+ mul x3, x8, x16
+ adcs x9, x9, x2
+ mul x2, x13, x16
+ adcs x12, x14, x12
+ mul x14, x10, x16
+ adcs x18, x1, x18
+ mul x1, x15, x16
+ adds x17, x17, x3
+ mul x3, x16, x16
+ umulh x8, x8, x16
+ umulh x15, x15, x16
+ umulh x10, x10, x16
+ umulh x13, x13, x16
+ umulh x16, x16, x16
+ str x17, [x0, #32]
+ adcs x11, x11, x1
+ adcs x9, x9, x14
+ adcs x12, x12, x2
+ adcs x14, x18, x3
+ adcs x17, xzr, xzr
+ adds x8, x11, x8
+ str x8, [x0, #40]
+ adcs x8, x9, x15
+ str x8, [x0, #48]
+ adcs x8, x12, x10
+ str x8, [x0, #56]
+ adcs x8, x14, x13
+ str x8, [x0, #64]
+ adcs x8, x17, x16
+ str x8, [x0, #72]
+ ret
+.Lfunc_end67:
+ .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L
+
+ .globl mcl_fp_mont5L
+ .align 2
+ .type mcl_fp_mont5L,@function
+mcl_fp_mont5L: // @mcl_fp_mont5L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #80 // =80
+ str x0, [sp, #72] // 8-byte Folded Spill
+ ldp x16, x10, [x1, #24]
+ ldp x18, x0, [x1, #8]
+ ldr x17, [x1]
+ ldur x9, [x3, #-8]
+ str x9, [sp, #16] // 8-byte Folded Spill
+ ldp x11, x8, [x3, #24]
+ ldp x14, x12, [x3, #8]
+ ldr x13, [x3]
+ ldp x3, x1, [x2]
+ ldp x4, x5, [x2, #16]
+ ldr x2, [x2, #32]
+ umulh x6, x10, x3
+ mul x7, x10, x3
+ umulh x19, x16, x3
+ mul x20, x16, x3
+ umulh x21, x0, x3
+ mul x22, x0, x3
+ umulh x23, x18, x3
+ mul x24, x18, x3
+ umulh x25, x17, x3
+ mul x3, x17, x3
+ umulh x26, x1, x10
+ mul x27, x1, x10
+ umulh x28, x1, x16
+ adds x24, x25, x24
+ mul x25, x3, x9
+ adcs x22, x23, x22
+ mul x23, x25, x8
+ mul x29, x25, x11
+ mul x30, x25, x12
+ adcs x20, x21, x20
+ mul x21, x25, x14
+ adcs x7, x19, x7
+ umulh x19, x25, x13
+ adcs x6, x6, xzr
+ adds x19, x19, x21
+ umulh x21, x25, x14
+ adcs x21, x21, x30
+ umulh x30, x25, x12
+ adcs x29, x30, x29
+ umulh x30, x25, x11
+ adcs x23, x30, x23
+ umulh x30, x25, x8
+ mul x25, x25, x13
+ adcs x30, x30, xzr
+ cmn x25, x3
+ mul x3, x1, x16
+ umulh x25, x1, x0
+ adcs x19, x19, x24
+ mul x24, x1, x0
+ adcs x21, x21, x22
+ umulh x22, x1, x18
+ adcs x20, x29, x20
+ mul x29, x1, x18
+ adcs x7, x23, x7
+ umulh x23, x1, x17
+ mul x1, x1, x17
+ adcs x6, x30, x6
+ adcs x30, xzr, xzr
+ adds x23, x23, x29
+ umulh x29, x4, x10
+ adcs x22, x22, x24
+ mul x24, x4, x10
+ adcs x3, x25, x3
+ umulh x25, x4, x16
+ adcs x27, x28, x27
+ adcs x26, x26, xzr
+ adds x1, x19, x1
+ adcs x19, x21, x23
+ mul x21, x1, x9
+ adcs x20, x20, x22
+ mul x22, x21, x8
+ mul x23, x21, x11
+ mul x28, x21, x12
+ adcs x3, x7, x3
+ mul x7, x21, x14
+ adcs x6, x6, x27
+ umulh x27, x21, x13
+ adcs x26, x30, x26
+ adcs x30, xzr, xzr
+ adds x7, x27, x7
+ umulh x27, x21, x14
+ adcs x27, x27, x28
+ umulh x28, x21, x12
+ adcs x23, x28, x23
+ umulh x28, x21, x11
+ adcs x22, x28, x22
+ umulh x28, x21, x8
+ mul x21, x21, x13
+ adcs x28, x28, xzr
+ cmn x21, x1
+ mul x1, x4, x16
+ umulh x21, x4, x0
+ adcs x7, x7, x19
+ mul x19, x4, x0
+ adcs x20, x27, x20
+ umulh x27, x4, x18
+ adcs x3, x23, x3
+ mul x23, x4, x18
+ adcs x6, x22, x6
+ umulh x22, x4, x17
+ mul x4, x4, x17
+ adcs x26, x28, x26
+ umulh x15, x5, x10
+ str x15, [sp, #64] // 8-byte Folded Spill
+ adcs x30, x30, xzr
+ adds x22, x22, x23
+ mul x15, x5, x10
+ str x15, [sp, #56] // 8-byte Folded Spill
+ adcs x19, x27, x19
+ umulh x15, x5, x16
+ str x15, [sp, #40] // 8-byte Folded Spill
+ adcs x1, x21, x1
+ mul x15, x5, x16
+ str x15, [sp, #32] // 8-byte Folded Spill
+ adcs x24, x25, x24
+ adcs x25, x29, xzr
+ adds x4, x7, x4
+ adcs x7, x20, x22
+ mul x20, x4, x9
+ adcs x3, x3, x19
+ mul x19, x20, x8
+ mul x22, x20, x11
+ mov x15, x12
+ mul x29, x20, x15
+ adcs x1, x6, x1
+ mov x21, x14
+ mul x6, x20, x21
+ adcs x24, x26, x24
+ mov x9, x13
+ umulh x26, x20, x9
+ adcs x25, x30, x25
+ adcs x30, xzr, xzr
+ adds x6, x26, x6
+ umulh x26, x20, x21
+ adcs x26, x26, x29
+ umulh x29, x20, x15
+ adcs x22, x29, x22
+ umulh x29, x20, x11
+ mov x13, x11
+ adcs x19, x29, x19
+ umulh x29, x20, x8
+ mov x12, x8
+ mul x20, x20, x9
+ mov x14, x9
+ adcs x29, x29, xzr
+ cmn x20, x4
+ umulh x4, x5, x0
+ mul x20, x5, x0
+ umulh x11, x5, x18
+ mul x9, x5, x18
+ umulh x8, x5, x17
+ mul x5, x5, x17
+ umulh x23, x2, x10
+ str x23, [sp, #48] // 8-byte Folded Spill
+ mul x10, x2, x10
+ str x10, [sp, #24] // 8-byte Folded Spill
+ umulh x10, x2, x16
+ str x10, [sp, #8] // 8-byte Folded Spill
+ mul x28, x2, x16
+ umulh x27, x2, x0
+ mul x23, x2, x0
+ umulh x16, x2, x18
+ mul x18, x2, x18
+ umulh x0, x2, x17
+ mul x17, x2, x17
+ adcs x2, x6, x7
+ adcs x3, x26, x3
+ adcs x1, x22, x1
+ adcs x6, x19, x24
+ adcs x7, x29, x25
+ adcs x19, x30, xzr
+ adds x8, x8, x9
+ adcs x9, x11, x20
+ ldr x10, [sp, #32] // 8-byte Folded Reload
+ adcs x10, x4, x10
+ ldr x11, [sp, #56] // 8-byte Folded Reload
+ ldr x4, [sp, #40] // 8-byte Folded Reload
+ adcs x4, x4, x11
+ ldr x11, [sp, #64] // 8-byte Folded Reload
+ adcs x20, x11, xzr
+ adds x2, x2, x5
+ adcs x8, x3, x8
+ ldr x24, [sp, #16] // 8-byte Folded Reload
+ mul x3, x2, x24
+ adcs x9, x1, x9
+ mul x1, x3, x12
+ mul x5, x3, x13
+ mul x22, x3, x15
+ adcs x10, x6, x10
+ mul x6, x3, x21
+ adcs x4, x7, x4
+ umulh x7, x3, x14
+ adcs x19, x19, x20
+ adcs x20, xzr, xzr
+ adds x6, x7, x6
+ umulh x7, x3, x21
+ adcs x7, x7, x22
+ umulh x22, x3, x15
+ mov x25, x15
+ adcs x5, x22, x5
+ umulh x22, x3, x13
+ adcs x1, x22, x1
+ umulh x22, x3, x12
+ mul x3, x3, x14
+ adcs x22, x22, xzr
+ cmn x3, x2
+ adcs x8, x6, x8
+ adcs x9, x7, x9
+ adcs x10, x5, x10
+ adcs x1, x1, x4
+ adcs x2, x22, x19
+ adcs x3, x20, xzr
+ adds x11, x0, x18
+ adcs x15, x16, x23
+ adcs x16, x27, x28
+ ldr x18, [sp, #24] // 8-byte Folded Reload
+ ldr x0, [sp, #8] // 8-byte Folded Reload
+ adcs x18, x0, x18
+ ldr x0, [sp, #48] // 8-byte Folded Reload
+ adcs x4, x0, xzr
+ adds x8, x8, x17
+ adcs x9, x9, x11
+ mul x11, x8, x24
+ adcs x10, x10, x15
+ umulh x15, x11, x12
+ mul x17, x11, x12
+ umulh x5, x11, x13
+ mul x6, x11, x13
+ mov x0, x13
+ mov x20, x25
+ umulh x7, x11, x20
+ mul x19, x11, x20
+ mov x23, x20
+ mov x13, x21
+ umulh x20, x11, x13
+ mul x21, x11, x13
+ umulh x22, x11, x14
+ mul x11, x11, x14
+ adcs x16, x1, x16
+ adcs x18, x2, x18
+ adcs x1, x3, x4
+ adcs x2, xzr, xzr
+ adds x3, x22, x21
+ adcs x4, x20, x19
+ adcs x6, x7, x6
+ adcs x17, x5, x17
+ adcs x15, x15, xzr
+ cmn x11, x8
+ adcs x8, x3, x9
+ adcs x9, x4, x10
+ adcs x10, x6, x16
+ adcs x11, x17, x18
+ adcs x15, x15, x1
+ adcs x16, x2, xzr
+ subs x1, x8, x14
+ sbcs x13, x9, x13
+ sbcs x14, x10, x23
+ sbcs x17, x11, x0
+ sbcs x18, x15, x12
+ sbcs x16, x16, xzr
+ tst x16, #0x1
+ csel x8, x8, x1, ne
+ csel x9, x9, x13, ne
+ csel x10, x10, x14, ne
+ csel x11, x11, x17, ne
+ csel x12, x15, x18, ne
+ ldr x13, [sp, #72] // 8-byte Folded Reload
+ stp x8, x9, [x13]
+ stp x10, x11, [x13, #16]
+ str x12, [x13, #32]
+ add sp, sp, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end68:
+ .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L
+
+ .globl mcl_fp_montNF5L
+ .align 2
+ .type mcl_fp_montNF5L,@function
+mcl_fp_montNF5L: // @mcl_fp_montNF5L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #32 // =32
+ str x0, [sp, #24] // 8-byte Folded Spill
+ ldp x16, x14, [x1, #24]
+ ldp x18, x15, [x1, #8]
+ ldr x17, [x1]
+ ldur x13, [x3, #-8]
+ ldp x9, x8, [x3, #24]
+ ldp x11, x10, [x3, #8]
+ ldr x12, [x3]
+ ldp x1, x3, [x2]
+ ldp x4, x5, [x2, #16]
+ ldr x2, [x2, #32]
+ umulh x6, x14, x1
+ mul x7, x14, x1
+ umulh x19, x16, x1
+ mul x20, x16, x1
+ umulh x21, x15, x1
+ mul x22, x15, x1
+ umulh x23, x18, x1
+ mul x24, x18, x1
+ umulh x25, x17, x1
+ mul x1, x17, x1
+ umulh x26, x3, x14
+ mul x27, x3, x14
+ umulh x28, x3, x16
+ mul x29, x3, x16
+ umulh x30, x3, x15
+ adds x24, x25, x24
+ mul x25, x3, x15
+ adcs x22, x23, x22
+ umulh x23, x3, x18
+ adcs x20, x21, x20
+ mul x21, x1, x13
+ adcs x7, x19, x7
+ mul x19, x21, x12
+ adcs x6, x6, xzr
+ cmn x19, x1
+ mul x1, x3, x18
+ mul x19, x21, x11
+ adcs x19, x19, x24
+ mul x24, x21, x10
+ adcs x22, x24, x22
+ mul x24, x21, x9
+ adcs x20, x24, x20
+ mul x24, x21, x8
+ adcs x7, x24, x7
+ umulh x24, x21, x12
+ adcs x6, x6, xzr
+ adds x19, x19, x24
+ umulh x24, x21, x11
+ adcs x22, x22, x24
+ umulh x24, x21, x10
+ adcs x20, x20, x24
+ umulh x24, x21, x9
+ adcs x7, x7, x24
+ umulh x24, x3, x17
+ mul x3, x3, x17
+ umulh x21, x21, x8
+ adcs x6, x6, x21
+ umulh x21, x4, x14
+ adds x1, x24, x1
+ mul x24, x4, x14
+ adcs x23, x23, x25
+ umulh x25, x4, x16
+ adcs x29, x30, x29
+ mul x30, x4, x16
+ adcs x27, x28, x27
+ umulh x28, x4, x15
+ adcs x26, x26, xzr
+ adds x3, x3, x19
+ mul x19, x4, x15
+ adcs x1, x1, x22
+ umulh x22, x4, x18
+ adcs x20, x23, x20
+ mul x23, x4, x18
+ adcs x7, x29, x7
+ mul x29, x3, x13
+ adcs x6, x27, x6
+ mul x27, x29, x12
+ adcs x26, x26, xzr
+ cmn x27, x3
+ umulh x3, x4, x17
+ mul x4, x4, x17
+ mul x27, x29, x11
+ adcs x1, x27, x1
+ mul x27, x29, x10
+ adcs x20, x27, x20
+ mul x27, x29, x9
+ adcs x7, x27, x7
+ mul x27, x29, x8
+ adcs x6, x27, x6
+ umulh x27, x29, x12
+ adcs x26, x26, xzr
+ adds x1, x1, x27
+ umulh x27, x29, x11
+ adcs x20, x20, x27
+ umulh x27, x29, x10
+ adcs x7, x7, x27
+ umulh x27, x29, x9
+ adcs x6, x6, x27
+ umulh x27, x5, x14
+ umulh x29, x29, x8
+ adcs x26, x26, x29
+ mul x29, x5, x14
+ adds x3, x3, x23
+ umulh x23, x5, x16
+ adcs x19, x22, x19
+ mul x22, x5, x16
+ adcs x28, x28, x30
+ umulh x30, x5, x15
+ adcs x24, x25, x24
+ mul x25, x5, x15
+ adcs x21, x21, xzr
+ adds x1, x4, x1
+ umulh x4, x5, x18
+ adcs x3, x3, x20
+ mul x20, x5, x18
+ adcs x7, x19, x7
+ umulh x19, x5, x17
+ mul x5, x5, x17
+ adcs x6, x28, x6
+ mul x28, x1, x13
+ adcs x24, x24, x26
+ mul x26, x28, x12
+ adcs x21, x21, xzr
+ cmn x26, x1
+ umulh x0, x2, x14
+ mul x14, x2, x14
+ stp x14, x0, [sp, #8]
+ umulh x26, x2, x16
+ mul x1, x2, x16
+ umulh x0, x2, x15
+ mul x16, x2, x15
+ umulh x15, x2, x18
+ mul x18, x2, x18
+ umulh x14, x2, x17
+ mul x17, x2, x17
+ mul x2, x28, x11
+ adcs x2, x2, x3
+ mul x3, x28, x10
+ adcs x3, x3, x7
+ mul x7, x28, x9
+ adcs x6, x7, x6
+ mul x7, x28, x8
+ adcs x7, x7, x24
+ adcs x21, x21, xzr
+ umulh x24, x28, x12
+ adds x2, x2, x24
+ umulh x24, x28, x11
+ adcs x3, x3, x24
+ umulh x24, x28, x10
+ adcs x6, x6, x24
+ umulh x24, x28, x9
+ adcs x7, x7, x24
+ umulh x24, x28, x8
+ adcs x21, x21, x24
+ adds x19, x19, x20
+ adcs x4, x4, x25
+ adcs x20, x30, x22
+ adcs x22, x23, x29
+ adcs x23, x27, xzr
+ adds x2, x5, x2
+ adcs x3, x19, x3
+ mov x24, x13
+ mul x5, x2, x24
+ adcs x4, x4, x6
+ mul x6, x5, x8
+ mul x19, x5, x9
+ adcs x7, x20, x7
+ mul x20, x5, x10
+ adcs x21, x22, x21
+ mul x22, x5, x12
+ adcs x23, x23, xzr
+ cmn x22, x2
+ mul x2, x5, x11
+ umulh x22, x5, x8
+ adcs x2, x2, x3
+ umulh x3, x5, x9
+ adcs x4, x20, x4
+ umulh x20, x5, x10
+ adcs x7, x19, x7
+ umulh x19, x5, x11
+ umulh x5, x5, x12
+ adcs x6, x6, x21
+ adcs x21, x23, xzr
+ adds x2, x2, x5
+ adcs x4, x4, x19
+ adcs x5, x7, x20
+ adcs x3, x6, x3
+ adcs x6, x21, x22
+ adds x13, x14, x18
+ adcs x14, x15, x16
+ adcs x15, x0, x1
+ ldp x16, x18, [sp, #8]
+ adcs x16, x26, x16
+ adcs x18, x18, xzr
+ adds x17, x17, x2
+ adcs x13, x13, x4
+ mul x0, x17, x24
+ adcs x14, x14, x5
+ mul x1, x0, x8
+ mul x2, x0, x9
+ mul x4, x0, x10
+ mul x5, x0, x11
+ mul x7, x0, x12
+ umulh x19, x0, x8
+ umulh x20, x0, x9
+ umulh x21, x0, x10
+ umulh x22, x0, x11
+ umulh x0, x0, x12
+ adcs x15, x15, x3
+ adcs x16, x16, x6
+ adcs x18, x18, xzr
+ cmn x7, x17
+ adcs x13, x5, x13
+ adcs x14, x4, x14
+ adcs x15, x2, x15
+ adcs x16, x1, x16
+ adcs x17, x18, xzr
+ adds x13, x13, x0
+ adcs x14, x14, x22
+ adcs x15, x15, x21
+ adcs x16, x16, x20
+ adcs x17, x17, x19
+ subs x12, x13, x12
+ sbcs x11, x14, x11
+ sbcs x10, x15, x10
+ sbcs x9, x16, x9
+ sbcs x8, x17, x8
+ asr x18, x8, #63
+ cmp x18, #0 // =0
+ csel x12, x13, x12, lt
+ csel x11, x14, x11, lt
+ csel x10, x15, x10, lt
+ csel x9, x16, x9, lt
+ csel x8, x17, x8, lt
+ ldr x13, [sp, #24] // 8-byte Folded Reload
+ stp x12, x11, [x13]
+ stp x10, x9, [x13, #16]
+ str x8, [x13, #32]
+ add sp, sp, #32 // =32
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end69:
+ .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L
+
+ .globl mcl_fp_montRed5L
+ .align 2
+ .type mcl_fp_montRed5L,@function
+mcl_fp_montRed5L: // @mcl_fp_montRed5L
+// BB#0:
+ stp x26, x25, [sp, #-64]!
+ stp x24, x23, [sp, #16]
+ stp x22, x21, [sp, #32]
+ stp x20, x19, [sp, #48]
+ ldur x13, [x2, #-8]
+ ldp x9, x8, [x2, #24]
+ ldp x11, x10, [x2, #8]
+ ldr x12, [x2]
+ ldp x15, x16, [x1, #64]
+ ldp x17, x18, [x1, #48]
+ ldp x2, x3, [x1, #32]
+ ldp x4, x5, [x1, #16]
+ ldp x14, x1, [x1]
+ mul x6, x14, x13
+ umulh x7, x6, x8
+ mul x19, x6, x8
+ umulh x20, x6, x9
+ mul x21, x6, x9
+ umulh x22, x6, x10
+ mul x23, x6, x10
+ umulh x24, x6, x11
+ mul x25, x6, x11
+ umulh x26, x6, x12
+ mul x6, x6, x12
+ adds x25, x26, x25
+ adcs x23, x24, x23
+ adcs x21, x22, x21
+ adcs x19, x20, x19
+ adcs x7, x7, xzr
+ cmn x14, x6
+ adcs x14, x1, x25
+ adcs x1, x4, x23
+ mul x4, x14, x13
+ adcs x5, x5, x21
+ umulh x6, x4, x8
+ mul x20, x4, x8
+ umulh x21, x4, x9
+ mul x22, x4, x9
+ umulh x23, x4, x10
+ mul x24, x4, x10
+ umulh x25, x4, x11
+ mul x26, x4, x11
+ adcs x2, x2, x19
+ umulh x19, x4, x12
+ mul x4, x4, x12
+ adcs x3, x3, x7
+ adcs x17, x17, xzr
+ adcs x18, x18, xzr
+ adcs x15, x15, xzr
+ adcs x16, x16, xzr
+ adcs x7, xzr, xzr
+ adds x19, x19, x26
+ adcs x24, x25, x24
+ adcs x22, x23, x22
+ adcs x20, x21, x20
+ adcs x6, x6, xzr
+ cmn x4, x14
+ adcs x14, x19, x1
+ adcs x1, x24, x5
+ mul x4, x14, x13
+ adcs x2, x22, x2
+ umulh x5, x4, x8
+ mul x19, x4, x8
+ umulh x21, x4, x9
+ mul x22, x4, x9
+ umulh x23, x4, x10
+ mul x24, x4, x10
+ umulh x25, x4, x11
+ mul x26, x4, x11
+ adcs x3, x20, x3
+ umulh x20, x4, x12
+ mul x4, x4, x12
+ adcs x17, x6, x17
+ adcs x18, x18, xzr
+ adcs x15, x15, xzr
+ adcs x16, x16, xzr
+ adcs x6, x7, xzr
+ adds x7, x20, x26
+ adcs x20, x25, x24
+ adcs x22, x23, x22
+ adcs x19, x21, x19
+ adcs x5, x5, xzr
+ cmn x4, x14
+ adcs x14, x7, x1
+ adcs x1, x20, x2
+ mul x2, x14, x13
+ adcs x3, x22, x3
+ umulh x4, x2, x8
+ mul x7, x2, x8
+ umulh x20, x2, x9
+ mul x21, x2, x9
+ umulh x22, x2, x10
+ mul x23, x2, x10
+ umulh x24, x2, x11
+ mul x25, x2, x11
+ umulh x26, x2, x12
+ mul x2, x2, x12
+ adcs x17, x19, x17
+ adcs x18, x5, x18
+ adcs x15, x15, xzr
+ adcs x16, x16, xzr
+ adcs x5, x6, xzr
+ adds x6, x26, x25
+ adcs x19, x24, x23
+ adcs x21, x22, x21
+ adcs x7, x20, x7
+ adcs x4, x4, xzr
+ cmn x2, x14
+ adcs x14, x6, x1
+ adcs x1, x19, x3
+ mul x13, x14, x13
+ adcs x17, x21, x17
+ umulh x2, x13, x8
+ mul x3, x13, x8
+ umulh x6, x13, x9
+ mul x19, x13, x9
+ umulh x20, x13, x10
+ mul x21, x13, x10
+ umulh x22, x13, x11
+ mul x23, x13, x11
+ umulh x24, x13, x12
+ mul x13, x13, x12
+ adcs x18, x7, x18
+ adcs x15, x4, x15
+ adcs x16, x16, xzr
+ adcs x4, x5, xzr
+ adds x5, x24, x23
+ adcs x7, x22, x21
+ adcs x19, x20, x19
+ adcs x3, x6, x3
+ adcs x2, x2, xzr
+ cmn x13, x14
+ adcs x13, x5, x1
+ adcs x14, x7, x17
+ adcs x17, x19, x18
+ adcs x15, x3, x15
+ adcs x16, x2, x16
+ adcs x18, x4, xzr
+ subs x12, x13, x12
+ sbcs x11, x14, x11
+ sbcs x10, x17, x10
+ sbcs x9, x15, x9
+ sbcs x8, x16, x8
+ sbcs x18, x18, xzr
+ tst x18, #0x1
+ csel x12, x13, x12, ne
+ csel x11, x14, x11, ne
+ csel x10, x17, x10, ne
+ csel x9, x15, x9, ne
+ csel x8, x16, x8, ne
+ stp x12, x11, [x0]
+ stp x10, x9, [x0, #16]
+ str x8, [x0, #32]
+ ldp x20, x19, [sp, #48]
+ ldp x22, x21, [sp, #32]
+ ldp x24, x23, [sp, #16]
+ ldp x26, x25, [sp], #64
+ ret
+.Lfunc_end70:
+ .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L
+
+ .globl mcl_fp_addPre5L
+ .align 2
+ .type mcl_fp_addPre5L,@function
+mcl_fp_addPre5L: // @mcl_fp_addPre5L
+// BB#0:
+ ldp x11, x8, [x2, #24]
+ ldp x17, x9, [x1, #24]
+ ldp x13, x10, [x2, #8]
+ ldr x12, [x2]
+ ldp x14, x15, [x1]
+ ldr x16, [x1, #16]
+ adds x12, x12, x14
+ str x12, [x0]
+ adcs x12, x13, x15
+ adcs x10, x10, x16
+ stp x12, x10, [x0, #8]
+ adcs x10, x11, x17
+ adcs x9, x8, x9
+ adcs x8, xzr, xzr
+ stp x10, x9, [x0, #24]
+ mov x0, x8
+ ret
+.Lfunc_end71:
+ .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L
+
+ .globl mcl_fp_subPre5L
+ .align 2
+ .type mcl_fp_subPre5L,@function
+mcl_fp_subPre5L: // @mcl_fp_subPre5L
+// BB#0:
+ ldp x11, x8, [x2, #24]
+ ldp x17, x9, [x1, #24]
+ ldp x13, x10, [x2, #8]
+ ldr x12, [x2]
+ ldp x14, x15, [x1]
+ ldr x16, [x1, #16]
+ subs x12, x14, x12
+ str x12, [x0]
+ sbcs x12, x15, x13
+ sbcs x10, x16, x10
+ stp x12, x10, [x0, #8]
+ sbcs x10, x17, x11
+ sbcs x9, x9, x8
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ stp x10, x9, [x0, #24]
+ mov x0, x8
+ ret
+.Lfunc_end72:
+ .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L
+
+ .globl mcl_fp_shr1_5L
+ .align 2
+ .type mcl_fp_shr1_5L,@function
+mcl_fp_shr1_5L: // @mcl_fp_shr1_5L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x1, #16]
+ ldr x12, [x1, #32]
+ extr x8, x9, x8, #1
+ extr x9, x10, x9, #1
+ extr x10, x11, x10, #1
+ extr x11, x12, x11, #1
+ lsr x12, x12, #1
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ str x12, [x0, #32]
+ ret
+.Lfunc_end73:
+ .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L
+
+ .globl mcl_fp_add5L
+ .align 2
+ .type mcl_fp_add5L,@function
+mcl_fp_add5L: // @mcl_fp_add5L
+// BB#0:
+ ldp x11, x8, [x2, #24]
+ ldp x17, x9, [x1, #24]
+ ldp x13, x10, [x2, #8]
+ ldr x12, [x2]
+ ldp x14, x15, [x1]
+ ldr x16, [x1, #16]
+ adds x12, x12, x14
+ ldr x14, [x3, #32]
+ adcs x13, x13, x15
+ adcs x10, x10, x16
+ ldp x15, x16, [x3]
+ stp x12, x13, [x0]
+ adcs x17, x11, x17
+ stp x10, x17, [x0, #16]
+ adcs x8, x8, x9
+ str x8, [x0, #32]
+ adcs x18, xzr, xzr
+ ldp x9, x1, [x3, #16]
+ subs x12, x12, x15
+ sbcs x11, x13, x16
+ sbcs x10, x10, x9
+ sbcs x9, x17, x1
+ sbcs x8, x8, x14
+ sbcs x13, x18, xzr
+ and w13, w13, #0x1
+ tbnz w13, #0, .LBB74_2
+// BB#1: // %nocarry
+ stp x12, x11, [x0]
+ stp x10, x9, [x0, #16]
+ str x8, [x0, #32]
+.LBB74_2: // %carry
+ ret
+.Lfunc_end74:
+ .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L
+
+ .globl mcl_fp_addNF5L
+ .align 2
+ .type mcl_fp_addNF5L,@function
+mcl_fp_addNF5L: // @mcl_fp_addNF5L
+// BB#0:
+ ldp x11, x8, [x1, #24]
+ ldp x17, x9, [x2, #24]
+ ldp x13, x10, [x1, #8]
+ ldr x12, [x1]
+ ldp x14, x15, [x2]
+ ldr x16, [x2, #16]
+ adds x12, x14, x12
+ ldp x18, x14, [x3, #24]
+ adcs x13, x15, x13
+ adcs x10, x16, x10
+ ldp x15, x16, [x3]
+ adcs x11, x17, x11
+ ldr x17, [x3, #16]
+ adcs x8, x9, x8
+ subs x9, x12, x15
+ sbcs x15, x13, x16
+ sbcs x16, x10, x17
+ sbcs x17, x11, x18
+ sbcs x14, x8, x14
+ asr x18, x14, #63
+ cmp x18, #0 // =0
+ csel x9, x12, x9, lt
+ csel x12, x13, x15, lt
+ csel x10, x10, x16, lt
+ csel x11, x11, x17, lt
+ csel x8, x8, x14, lt
+ stp x9, x12, [x0]
+ stp x10, x11, [x0, #16]
+ str x8, [x0, #32]
+ ret
+.Lfunc_end75:
+ .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L
+
+ .globl mcl_fp_sub5L
+ .align 2
+ .type mcl_fp_sub5L,@function
+mcl_fp_sub5L: // @mcl_fp_sub5L
+// BB#0:
+ ldp x11, x12, [x2, #24]
+ ldp x17, x13, [x1, #24]
+ ldp x9, x10, [x2, #8]
+ ldr x8, [x2]
+ ldp x14, x15, [x1]
+ ldr x16, [x1, #16]
+ subs x8, x14, x8
+ sbcs x9, x15, x9
+ stp x8, x9, [x0]
+ sbcs x10, x16, x10
+ sbcs x11, x17, x11
+ stp x10, x11, [x0, #16]
+ sbcs x12, x13, x12
+ str x12, [x0, #32]
+ ngcs x13, xzr
+ and w13, w13, #0x1
+ tbnz w13, #0, .LBB76_2
+// BB#1: // %nocarry
+ ret
+.LBB76_2: // %carry
+ ldp x17, x13, [x3, #24]
+ ldp x14, x15, [x3]
+ ldr x16, [x3, #16]
+ adds x8, x14, x8
+ adcs x9, x15, x9
+ adcs x10, x16, x10
+ adcs x11, x17, x11
+ adcs x12, x13, x12
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ str x12, [x0, #32]
+ ret
+.Lfunc_end76:
+ .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L
+
+ .globl mcl_fp_subNF5L
+ .align 2
+ .type mcl_fp_subNF5L,@function
+mcl_fp_subNF5L: // @mcl_fp_subNF5L
+// BB#0:
+ ldp x11, x8, [x2, #24]
+ ldp x17, x9, [x1, #24]
+ ldp x13, x10, [x2, #8]
+ ldr x12, [x2]
+ ldp x14, x15, [x1]
+ ldr x16, [x1, #16]
+ subs x12, x14, x12
+ sbcs x13, x15, x13
+ ldp x1, x14, [x3, #8]
+ ldp x15, x18, [x3, #24]
+ sbcs x10, x16, x10
+ ldr x16, [x3]
+ sbcs x11, x17, x11
+ sbcs x8, x9, x8
+ asr x9, x8, #63
+ extr x17, x9, x8, #63
+ and x16, x17, x16
+ and x14, x14, x9, ror #63
+ and x15, x9, x15
+ and x17, x9, x18
+ ror x9, x9, #63
+ and x9, x9, x1
+ adds x12, x16, x12
+ adcs x9, x9, x13
+ stp x12, x9, [x0]
+ adcs x9, x14, x10
+ str x9, [x0, #16]
+ adcs x9, x15, x11
+ adcs x8, x17, x8
+ stp x9, x8, [x0, #24]
+ ret
+.Lfunc_end77:
+ .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L
+
+ .globl mcl_fpDbl_add5L
+ .align 2
+ .type mcl_fpDbl_add5L,@function
+mcl_fpDbl_add5L: // @mcl_fpDbl_add5L
+// BB#0:
+ stp x22, x21, [sp, #-32]!
+ stp x20, x19, [sp, #16]
+ ldp x8, x9, [x2, #64]
+ ldp x10, x11, [x1, #64]
+ ldp x12, x13, [x2, #48]
+ ldp x14, x15, [x1, #48]
+ ldp x16, x17, [x2, #32]
+ ldp x18, x4, [x1, #32]
+ ldp x5, x6, [x2, #16]
+ ldp x19, x2, [x2]
+ ldp x20, x21, [x1, #16]
+ ldp x7, x1, [x1]
+ adds x7, x19, x7
+ ldr x19, [x3, #32]
+ str x7, [x0]
+ adcs x1, x2, x1
+ ldp x2, x7, [x3, #16]
+ str x1, [x0, #8]
+ ldp x1, x3, [x3]
+ adcs x5, x5, x20
+ str x5, [x0, #16]
+ adcs x5, x6, x21
+ adcs x16, x16, x18
+ stp x5, x16, [x0, #24]
+ adcs x16, x17, x4
+ adcs x12, x12, x14
+ adcs x13, x13, x15
+ adcs x8, x8, x10
+ adcs x9, x9, x11
+ adcs x10, xzr, xzr
+ subs x11, x16, x1
+ sbcs x14, x12, x3
+ sbcs x15, x13, x2
+ sbcs x17, x8, x7
+ sbcs x18, x9, x19
+ sbcs x10, x10, xzr
+ tst x10, #0x1
+ csel x10, x16, x11, ne
+ csel x11, x12, x14, ne
+ csel x12, x13, x15, ne
+ csel x8, x8, x17, ne
+ csel x9, x9, x18, ne
+ stp x10, x11, [x0, #40]
+ stp x12, x8, [x0, #56]
+ str x9, [x0, #72]
+ ldp x20, x19, [sp, #16]
+ ldp x22, x21, [sp], #32
+ ret
+.Lfunc_end78:
+ .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L
+
+ .globl mcl_fpDbl_sub5L
+ .align 2
+ .type mcl_fpDbl_sub5L,@function
+mcl_fpDbl_sub5L: // @mcl_fpDbl_sub5L
+// BB#0:
+ stp x22, x21, [sp, #-32]!
+ stp x20, x19, [sp, #16]
+ ldp x8, x9, [x2, #64]
+ ldp x10, x11, [x1, #64]
+ ldp x12, x13, [x2, #48]
+ ldp x14, x15, [x1, #48]
+ ldp x16, x17, [x2, #32]
+ ldp x18, x4, [x1, #32]
+ ldp x5, x6, [x2, #16]
+ ldp x7, x2, [x2]
+ ldp x20, x21, [x1, #16]
+ ldp x19, x1, [x1]
+ subs x7, x19, x7
+ ldr x19, [x3, #32]
+ str x7, [x0]
+ sbcs x1, x1, x2
+ ldp x2, x7, [x3, #16]
+ str x1, [x0, #8]
+ ldp x1, x3, [x3]
+ sbcs x5, x20, x5
+ str x5, [x0, #16]
+ sbcs x5, x21, x6
+ sbcs x16, x18, x16
+ stp x5, x16, [x0, #24]
+ sbcs x16, x4, x17
+ sbcs x12, x14, x12
+ sbcs x13, x15, x13
+ sbcs x8, x10, x8
+ sbcs x9, x11, x9
+ ngcs x10, xzr
+ tst x10, #0x1
+ csel x10, x19, xzr, ne
+ csel x11, x7, xzr, ne
+ csel x14, x2, xzr, ne
+ csel x15, x3, xzr, ne
+ csel x17, x1, xzr, ne
+ adds x16, x17, x16
+ adcs x12, x15, x12
+ stp x16, x12, [x0, #40]
+ adcs x12, x14, x13
+ adcs x8, x11, x8
+ stp x12, x8, [x0, #56]
+ adcs x8, x10, x9
+ str x8, [x0, #72]
+ ldp x20, x19, [sp, #16]
+ ldp x22, x21, [sp], #32
+ ret
+.Lfunc_end79:
+ .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L
+
+ .globl mcl_fp_mulUnitPre6L
+ .align 2
+ .type mcl_fp_mulUnitPre6L,@function
+mcl_fp_mulUnitPre6L: // @mcl_fp_mulUnitPre6L
+// BB#0:
+ ldp x8, x9, [x1, #32]
+ ldp x10, x11, [x1]
+ ldp x12, x13, [x1, #16]
+ mul x14, x10, x2
+ mul x15, x11, x2
+ umulh x10, x10, x2
+ mul x16, x12, x2
+ umulh x11, x11, x2
+ mul x17, x13, x2
+ umulh x12, x12, x2
+ mul x18, x8, x2
+ umulh x13, x13, x2
+ mul x1, x9, x2
+ umulh x8, x8, x2
+ umulh x9, x9, x2
+ adds x10, x10, x15
+ stp x14, x10, [x0]
+ adcs x10, x11, x16
+ str x10, [x0, #16]
+ adcs x10, x12, x17
+ str x10, [x0, #24]
+ adcs x10, x13, x18
+ adcs x8, x8, x1
+ stp x10, x8, [x0, #32]
+ adcs x8, x9, xzr
+ str x8, [x0, #48]
+ ret
+.Lfunc_end80:
+ .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L
+
+ .globl mcl_fpDbl_mulPre6L
+ .align 2
+ .type mcl_fpDbl_mulPre6L,@function
+mcl_fpDbl_mulPre6L: // @mcl_fpDbl_mulPre6L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #400 // =400
+ ldp x8, x9, [x1]
+ ldp x11, x13, [x1]
+ ldp x10, x17, [x1, #16]
+ ldp x12, x14, [x1, #32]
+ ldp x15, x16, [x2]
+ ldr x3, [x1, #32]
+ mul x30, x8, x15
+ umulh x18, x14, x15
+ str x18, [sp, #392] // 8-byte Folded Spill
+ mul x18, x14, x15
+ str x18, [sp, #384] // 8-byte Folded Spill
+ umulh x18, x12, x15
+ str x18, [sp, #376] // 8-byte Folded Spill
+ mul x18, x12, x15
+ str x18, [sp, #360] // 8-byte Folded Spill
+ umulh x18, x17, x15
+ str x18, [sp, #336] // 8-byte Folded Spill
+ mul x18, x17, x15
+ str x18, [sp, #312] // 8-byte Folded Spill
+ umulh x18, x10, x15
+ str x18, [sp, #304] // 8-byte Folded Spill
+ mul x18, x10, x15
+ str x18, [sp, #272] // 8-byte Folded Spill
+ umulh x18, x9, x15
+ str x18, [sp, #248] // 8-byte Folded Spill
+ mul x18, x9, x15
+ umulh x15, x8, x15
+ stp x15, x18, [sp, #216]
+ mul x15, x8, x16
+ str x15, [sp, #280] // 8-byte Folded Spill
+ mul x15, x14, x16
+ str x15, [sp, #352] // 8-byte Folded Spill
+ mul x15, x12, x16
+ str x15, [sp, #328] // 8-byte Folded Spill
+ mul x15, x17, x16
+ str x15, [sp, #296] // 8-byte Folded Spill
+ mul x15, x10, x16
+ str x15, [sp, #264] // 8-byte Folded Spill
+ mul x15, x9, x16
+ umulh x14, x14, x16
+ str x14, [sp, #368] // 8-byte Folded Spill
+ umulh x12, x12, x16
+ str x12, [sp, #344] // 8-byte Folded Spill
+ umulh x12, x17, x16
+ str x12, [sp, #320] // 8-byte Folded Spill
+ umulh x10, x10, x16
+ str x10, [sp, #288] // 8-byte Folded Spill
+ umulh x9, x9, x16
+ str x9, [sp, #256] // 8-byte Folded Spill
+ umulh x8, x8, x16
+ stp x8, x15, [sp, #232]
+ ldp x12, x8, [x2, #16]
+ ldr x9, [x1, #40]
+ ldp x15, x10, [x1, #16]
+ mul x14, x11, x12
+ str x14, [sp, #144] // 8-byte Folded Spill
+ mul x14, x9, x12
+ str x14, [sp, #200] // 8-byte Folded Spill
+ mul x14, x3, x12
+ str x14, [sp, #176] // 8-byte Folded Spill
+ mul x14, x10, x12
+ str x14, [sp, #160] // 8-byte Folded Spill
+ mul x14, x15, x12
+ str x14, [sp, #128] // 8-byte Folded Spill
+ mul x14, x13, x12
+ str x14, [sp, #112] // 8-byte Folded Spill
+ umulh x14, x9, x12
+ str x14, [sp, #208] // 8-byte Folded Spill
+ umulh x14, x3, x12
+ str x14, [sp, #192] // 8-byte Folded Spill
+ umulh x14, x10, x12
+ str x14, [sp, #168] // 8-byte Folded Spill
+ umulh x14, x15, x12
+ str x14, [sp, #152] // 8-byte Folded Spill
+ umulh x14, x13, x12
+ str x14, [sp, #120] // 8-byte Folded Spill
+ umulh x12, x11, x12
+ str x12, [sp, #104] // 8-byte Folded Spill
+ mul x12, x9, x8
+ str x12, [sp, #184] // 8-byte Folded Spill
+ umulh x9, x9, x8
+ str x9, [sp, #136] // 8-byte Folded Spill
+ mul x9, x3, x8
+ str x9, [sp, #80] // 8-byte Folded Spill
+ umulh x9, x3, x8
+ str x9, [sp, #96] // 8-byte Folded Spill
+ mul x9, x10, x8
+ str x9, [sp, #64] // 8-byte Folded Spill
+ umulh x9, x10, x8
+ str x9, [sp, #88] // 8-byte Folded Spill
+ mul x9, x15, x8
+ str x9, [sp, #48] // 8-byte Folded Spill
+ umulh x9, x15, x8
+ str x9, [sp, #72] // 8-byte Folded Spill
+ mul x9, x13, x8
+ str x9, [sp, #32] // 8-byte Folded Spill
+ umulh x9, x13, x8
+ str x9, [sp, #56] // 8-byte Folded Spill
+ mul x9, x11, x8
+ str x9, [sp, #24] // 8-byte Folded Spill
+ umulh x8, x11, x8
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldp x12, x13, [x1, #32]
+ ldp x9, x10, [x1]
+ ldp x11, x1, [x1, #16]
+ ldp x8, x2, [x2, #32]
+ mul x22, x9, x8
+ mul x28, x13, x8
+ mul x27, x12, x8
+ mul x24, x1, x8
+ mul x20, x11, x8
+ mul x19, x10, x8
+ umulh x14, x13, x8
+ str x14, [sp, #16] // 8-byte Folded Spill
+ umulh x29, x12, x8
+ umulh x26, x1, x8
+ umulh x23, x11, x8
+ umulh x21, x10, x8
+ umulh x7, x9, x8
+ mul x25, x9, x2
+ umulh x6, x9, x2
+ mul x4, x10, x2
+ umulh x5, x10, x2
+ mul x18, x11, x2
+ umulh x3, x11, x2
+ mul x16, x1, x2
+ umulh x1, x1, x2
+ mul x15, x12, x2
+ umulh x17, x12, x2
+ mul x14, x13, x2
+ umulh x13, x13, x2
+ str x30, [x0]
+ ldp x9, x8, [sp, #216]
+ adds x2, x9, x8
+ ldp x8, x30, [sp, #272]
+ ldr x9, [sp, #248] // 8-byte Folded Reload
+ adcs x8, x9, x8
+ ldp x10, x9, [sp, #304]
+ adcs x9, x10, x9
+ ldr x10, [sp, #360] // 8-byte Folded Reload
+ ldr x11, [sp, #336] // 8-byte Folded Reload
+ adcs x10, x11, x10
+ ldp x12, x11, [sp, #376]
+ adcs x11, x12, x11
+ ldr x12, [sp, #392] // 8-byte Folded Reload
+ adcs x12, x12, xzr
+ adds x2, x30, x2
+ str x2, [x0, #8]
+ ldp x30, x2, [sp, #232]
+ adcs x8, x2, x8
+ ldr x2, [sp, #264] // 8-byte Folded Reload
+ adcs x9, x2, x9
+ ldr x2, [sp, #296] // 8-byte Folded Reload
+ adcs x10, x2, x10
+ ldr x2, [sp, #328] // 8-byte Folded Reload
+ adcs x11, x2, x11
+ ldr x2, [sp, #352] // 8-byte Folded Reload
+ adcs x12, x2, x12
+ adcs x2, xzr, xzr
+ adds x8, x8, x30
+ ldr x30, [sp, #256] // 8-byte Folded Reload
+ adcs x9, x9, x30
+ ldr x30, [sp, #288] // 8-byte Folded Reload
+ adcs x10, x10, x30
+ ldr x30, [sp, #320] // 8-byte Folded Reload
+ adcs x11, x11, x30
+ ldr x30, [sp, #344] // 8-byte Folded Reload
+ adcs x12, x12, x30
+ ldr x30, [sp, #368] // 8-byte Folded Reload
+ adcs x2, x2, x30
+ ldr x30, [sp, #144] // 8-byte Folded Reload
+ adds x8, x30, x8
+ str x8, [x0, #16]
+ ldp x30, x8, [sp, #104]
+ adcs x8, x8, x9
+ ldr x9, [sp, #128] // 8-byte Folded Reload
+ adcs x9, x9, x10
+ ldr x10, [sp, #160] // 8-byte Folded Reload
+ adcs x10, x10, x11
+ ldr x11, [sp, #176] // 8-byte Folded Reload
+ adcs x11, x11, x12
+ ldr x12, [sp, #200] // 8-byte Folded Reload
+ adcs x12, x12, x2
+ adcs x2, xzr, xzr
+ adds x8, x8, x30
+ ldr x30, [sp, #120] // 8-byte Folded Reload
+ adcs x9, x9, x30
+ ldr x30, [sp, #152] // 8-byte Folded Reload
+ adcs x10, x10, x30
+ ldr x30, [sp, #168] // 8-byte Folded Reload
+ adcs x11, x11, x30
+ ldr x30, [sp, #192] // 8-byte Folded Reload
+ adcs x12, x12, x30
+ ldr x30, [sp, #208] // 8-byte Folded Reload
+ adcs x2, x2, x30
+ ldr x30, [sp, #24] // 8-byte Folded Reload
+ adds x8, x30, x8
+ str x8, [x0, #24]
+ ldp x8, x30, [sp, #32]
+ adcs x8, x8, x9
+ ldr x9, [sp, #48] // 8-byte Folded Reload
+ adcs x9, x9, x10
+ ldr x10, [sp, #64] // 8-byte Folded Reload
+ adcs x10, x10, x11
+ ldr x11, [sp, #80] // 8-byte Folded Reload
+ adcs x11, x11, x12
+ ldr x12, [sp, #184] // 8-byte Folded Reload
+ adcs x12, x12, x2
+ adcs x2, xzr, xzr
+ adds x8, x8, x30
+ ldr x30, [sp, #56] // 8-byte Folded Reload
+ adcs x9, x9, x30
+ ldr x30, [sp, #72] // 8-byte Folded Reload
+ adcs x10, x10, x30
+ ldr x30, [sp, #88] // 8-byte Folded Reload
+ adcs x11, x11, x30
+ ldr x30, [sp, #96] // 8-byte Folded Reload
+ adcs x12, x12, x30
+ ldr x30, [sp, #136] // 8-byte Folded Reload
+ adcs x2, x2, x30
+ adds x8, x22, x8
+ str x8, [x0, #32]
+ adcs x8, x19, x9
+ adcs x9, x20, x10
+ adcs x10, x24, x11
+ adcs x11, x27, x12
+ adcs x12, x28, x2
+ adcs x2, xzr, xzr
+ adds x8, x8, x7
+ adcs x9, x9, x21
+ adcs x10, x10, x23
+ adcs x11, x11, x26
+ adcs x12, x12, x29
+ ldr x7, [sp, #16] // 8-byte Folded Reload
+ adcs x2, x2, x7
+ adds x8, x25, x8
+ str x8, [x0, #40]
+ adcs x8, x4, x9
+ adcs x9, x18, x10
+ adcs x10, x16, x11
+ adcs x11, x15, x12
+ adcs x12, x14, x2
+ adcs x14, xzr, xzr
+ adds x8, x8, x6
+ str x8, [x0, #48]
+ adcs x8, x9, x5
+ str x8, [x0, #56]
+ adcs x8, x10, x3
+ str x8, [x0, #64]
+ adcs x8, x11, x1
+ str x8, [x0, #72]
+ adcs x8, x12, x17
+ str x8, [x0, #80]
+ adcs x8, x14, x13
+ str x8, [x0, #88]
+ add sp, sp, #400 // =400
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end81:
+ .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L
+
+ .globl mcl_fpDbl_sqrPre6L
+ .align 2
+ .type mcl_fpDbl_sqrPre6L,@function
+mcl_fpDbl_sqrPre6L: // @mcl_fpDbl_sqrPre6L
+// BB#0:
+ stp x20, x19, [sp, #-16]!
+ ldp x8, x9, [x1, #8]
+ ldp x15, x10, [x1, #32]
+ ldp x11, x13, [x1]
+ ldr x12, [x1]
+ ldp x17, x14, [x1, #32]
+ ldr x16, [x1, #24]
+ mul x18, x11, x11
+ umulh x2, x10, x11
+ mul x3, x15, x11
+ mul x4, x16, x11
+ umulh x5, x9, x11
+ mul x6, x9, x11
+ umulh x7, x8, x11
+ mul x19, x8, x11
+ str x18, [x0]
+ umulh x18, x11, x11
+ adds x18, x18, x19
+ adcs x6, x7, x6
+ adcs x4, x5, x4
+ umulh x5, x16, x11
+ adcs x3, x5, x3
+ mul x5, x10, x11
+ umulh x11, x15, x11
+ adcs x11, x11, x5
+ adcs x2, x2, xzr
+ adds x18, x19, x18
+ ldp x5, x19, [x1, #16]
+ str x18, [x0, #8]
+ mul x18, x8, x8
+ adcs x18, x18, x6
+ mul x6, x9, x8
+ adcs x4, x6, x4
+ mul x6, x16, x8
+ adcs x3, x6, x3
+ mul x6, x15, x8
+ adcs x11, x6, x11
+ mul x6, x10, x8
+ adcs x2, x6, x2
+ adcs x6, xzr, xzr
+ adds x18, x18, x7
+ ldr x7, [x1, #32]
+ umulh x10, x10, x8
+ umulh x15, x15, x8
+ umulh x16, x16, x8
+ umulh x9, x9, x8
+ umulh x8, x8, x8
+ adcs x8, x4, x8
+ adcs x9, x3, x9
+ ldp x3, x4, [x1]
+ adcs x11, x11, x16
+ mul x16, x12, x5
+ adcs x15, x2, x15
+ mul x2, x14, x5
+ adcs x10, x6, x10
+ mul x6, x7, x5
+ adds x16, x16, x18
+ mul x18, x19, x5
+ str x16, [x0, #16]
+ mul x16, x13, x5
+ adcs x8, x16, x8
+ mul x16, x5, x5
+ adcs x9, x16, x9
+ umulh x16, x7, x5
+ adcs x11, x18, x11
+ adcs x15, x6, x15
+ umulh x6, x12, x5
+ adcs x10, x2, x10
+ adcs x2, xzr, xzr
+ adds x8, x8, x6
+ umulh x6, x13, x5
+ adcs x9, x9, x6
+ umulh x6, x5, x5
+ adcs x11, x11, x6
+ umulh x6, x19, x5
+ adcs x15, x15, x6
+ adcs x10, x10, x16
+ umulh x5, x14, x5
+ adcs x2, x2, x5
+ mul x5, x12, x19
+ adds x8, x5, x8
+ ldp x16, x5, [x1, #16]
+ ldr x1, [x1, #40]
+ str x8, [x0, #24]
+ mul x8, x13, x19
+ adcs x8, x8, x9
+ mul x9, x14, x19
+ adcs x11, x18, x11
+ mul x18, x19, x19
+ adcs x15, x18, x15
+ mul x18, x7, x19
+ umulh x14, x14, x19
+ umulh x7, x7, x19
+ umulh x13, x13, x19
+ umulh x12, x12, x19
+ umulh x19, x19, x19
+ adcs x10, x18, x10
+ mul x18, x3, x17
+ adcs x9, x9, x2
+ adcs x2, xzr, xzr
+ adds x8, x8, x12
+ mul x12, x1, x17
+ adcs x11, x11, x13
+ mul x13, x5, x17
+ adcs x15, x15, x6
+ mul x6, x16, x17
+ adcs x10, x10, x19
+ mul x19, x4, x17
+ adcs x9, x9, x7
+ mul x7, x17, x17
+ adcs x14, x2, x14
+ umulh x2, x1, x17
+ adds x8, x18, x8
+ umulh x18, x5, x17
+ str x8, [x0, #32]
+ umulh x8, x16, x17
+ adcs x11, x19, x11
+ umulh x19, x4, x17
+ adcs x15, x6, x15
+ umulh x6, x3, x17
+ umulh x17, x17, x17
+ adcs x10, x13, x10
+ mul x13, x3, x1
+ adcs x9, x7, x9
+ adcs x14, x12, x14
+ adcs x7, xzr, xzr
+ adds x11, x11, x6
+ mul x6, x5, x1
+ adcs x15, x15, x19
+ mul x19, x16, x1
+ adcs x8, x10, x8
+ mul x10, x4, x1
+ adcs x9, x9, x18
+ mul x18, x1, x1
+ umulh x3, x3, x1
+ umulh x4, x4, x1
+ umulh x16, x16, x1
+ umulh x5, x5, x1
+ umulh x1, x1, x1
+ adcs x14, x14, x17
+ adcs x17, x7, x2
+ adds x11, x13, x11
+ str x11, [x0, #40]
+ adcs x10, x10, x15
+ adcs x8, x19, x8
+ adcs x9, x6, x9
+ adcs x11, x12, x14
+ adcs x12, x18, x17
+ adcs x13, xzr, xzr
+ adds x10, x10, x3
+ adcs x8, x8, x4
+ stp x10, x8, [x0, #48]
+ adcs x8, x9, x16
+ str x8, [x0, #64]
+ adcs x8, x11, x5
+ str x8, [x0, #72]
+ adcs x8, x12, x2
+ str x8, [x0, #80]
+ adcs x8, x13, x1
+ str x8, [x0, #88]
+ ldp x20, x19, [sp], #16
+ ret
+.Lfunc_end82:
+ .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L
+
+ .globl mcl_fp_mont6L
+ .align 2
+ .type mcl_fp_mont6L,@function
+mcl_fp_mont6L: // @mcl_fp_mont6L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #48 // =48
+ str x0, [sp, #24] // 8-byte Folded Spill
+ ldr x5, [x2]
+ ldp x0, x4, [x1, #32]
+ ldp x16, x18, [x1, #16]
+ ldp x10, x1, [x1]
+ ldur x12, [x3, #-8]
+ str x12, [sp, #40] // 8-byte Folded Spill
+ ldp x11, x8, [x3, #32]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldp x13, x17, [x3, #16]
+ ldp x14, x15, [x3]
+ ldr x3, [x2, #8]
+ umulh x6, x4, x5
+ mul x7, x4, x5
+ umulh x19, x0, x5
+ mul x20, x0, x5
+ umulh x21, x18, x5
+ mul x22, x18, x5
+ umulh x23, x16, x5
+ mul x24, x16, x5
+ umulh x25, x1, x5
+ mul x26, x1, x5
+ umulh x27, x10, x5
+ mul x5, x10, x5
+ umulh x28, x3, x4
+ adds x26, x27, x26
+ mul x27, x5, x12
+ adcs x24, x25, x24
+ mul x25, x27, x8
+ mul x29, x27, x11
+ mul x30, x27, x17
+ adcs x22, x23, x22
+ mul x23, x27, x13
+ adcs x20, x21, x20
+ mul x21, x27, x15
+ adcs x7, x19, x7
+ umulh x19, x27, x14
+ adcs x6, x6, xzr
+ adds x19, x19, x21
+ umulh x21, x27, x15
+ adcs x21, x21, x23
+ umulh x23, x27, x13
+ adcs x23, x23, x30
+ umulh x30, x27, x17
+ adcs x29, x30, x29
+ umulh x30, x27, x11
+ adcs x25, x30, x25
+ umulh x30, x27, x8
+ mul x27, x27, x14
+ adcs x30, x30, xzr
+ cmn x27, x5
+ mul x5, x3, x4
+ umulh x27, x3, x0
+ adcs x19, x19, x26
+ mul x26, x3, x0
+ adcs x21, x21, x24
+ mul x24, x3, x18
+ adcs x22, x23, x22
+ mul x23, x3, x16
+ adcs x20, x29, x20
+ mul x29, x3, x1
+ adcs x7, x25, x7
+ umulh x25, x3, x10
+ adcs x30, x30, x6
+ adcs x6, xzr, xzr
+ adds x25, x25, x29
+ umulh x29, x3, x1
+ adcs x23, x29, x23
+ umulh x29, x3, x16
+ adcs x24, x29, x24
+ umulh x29, x3, x18
+ mul x3, x3, x10
+ adcs x26, x29, x26
+ adcs x27, x27, x5
+ adcs x29, x28, xzr
+ adds x3, x19, x3
+ adcs x5, x21, x25
+ mul x21, x3, x12
+ adcs x28, x22, x23
+ umulh x22, x21, x8
+ mul x23, x21, x8
+ mul x25, x21, x11
+ mul x9, x21, x17
+ adcs x19, x20, x24
+ mul x8, x21, x13
+ adcs x20, x7, x26
+ mul x24, x21, x15
+ adcs x30, x30, x27
+ umulh x26, x21, x14
+ adcs x6, x6, x29
+ adcs x7, xzr, xzr
+ adds x24, x26, x24
+ umulh x26, x21, x15
+ adcs x29, x26, x8
+ umulh x8, x21, x13
+ adcs x26, x8, x9
+ umulh x8, x21, x17
+ adcs x27, x8, x25
+ umulh x8, x21, x11
+ mul x9, x21, x14
+ adcs x8, x8, x23
+ adcs x21, x22, xzr
+ cmn x9, x3
+ ldp x23, x3, [x2, #16]
+ umulh x9, x23, x4
+ adcs x5, x24, x5
+ mul x22, x23, x4
+ adcs x24, x29, x28
+ mul x25, x23, x0
+ adcs x19, x26, x19
+ mul x26, x23, x18
+ adcs x20, x27, x20
+ mul x27, x23, x16
+ adcs x8, x8, x30
+ mul x28, x23, x1
+ adcs x21, x21, x6
+ umulh x6, x23, x10
+ adcs x7, x7, xzr
+ adds x6, x6, x28
+ umulh x28, x23, x1
+ adcs x27, x28, x27
+ umulh x28, x23, x16
+ adcs x26, x28, x26
+ umulh x28, x23, x18
+ adcs x25, x28, x25
+ umulh x28, x23, x0
+ mul x23, x23, x10
+ adcs x22, x28, x22
+ adcs x9, x9, xzr
+ adds x23, x5, x23
+ adcs x5, x24, x6
+ mul x29, x23, x12
+ adcs x6, x19, x27
+ ldr x12, [sp, #32] // 8-byte Folded Reload
+ mul x28, x29, x12
+ mul x27, x29, x11
+ mul x30, x29, x17
+ adcs x19, x20, x26
+ mul x26, x29, x13
+ adcs x20, x8, x25
+ mul x8, x29, x15
+ adcs x21, x21, x22
+ umulh x24, x29, x14
+ adcs x22, x7, x9
+ adcs x7, xzr, xzr
+ adds x24, x24, x8
+ umulh x8, x29, x15
+ adcs x25, x8, x26
+ umulh x8, x29, x13
+ adcs x26, x8, x30
+ umulh x8, x29, x17
+ adcs x27, x8, x27
+ umulh x8, x29, x11
+ adcs x28, x8, x28
+ umulh x8, x29, x12
+ mul x9, x29, x14
+ adcs x29, x8, xzr
+ cmn x9, x23
+ ldp x23, x8, [x2, #32]
+ umulh x30, x3, x4
+ adcs x2, x24, x5
+ mul x5, x3, x4
+ adcs x6, x25, x6
+ mul x24, x3, x0
+ adcs x19, x26, x19
+ mul x25, x3, x18
+ adcs x20, x27, x20
+ mul x26, x3, x16
+ adcs x21, x28, x21
+ mul x27, x3, x1
+ adcs x22, x29, x22
+ mov x9, x10
+ umulh x28, x3, x9
+ adcs x7, x7, xzr
+ adds x27, x28, x27
+ umulh x28, x3, x1
+ adcs x26, x28, x26
+ umulh x28, x3, x16
+ adcs x25, x28, x25
+ umulh x28, x3, x18
+ adcs x24, x28, x24
+ umulh x28, x3, x0
+ mul x3, x3, x9
+ adcs x5, x28, x5
+ adcs x29, x30, xzr
+ adds x2, x2, x3
+ adcs x3, x6, x27
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ mul x6, x2, x10
+ adcs x19, x19, x26
+ mul x26, x6, x12
+ mul x27, x6, x11
+ mov x30, x17
+ mul x28, x6, x30
+ adcs x20, x20, x25
+ mul x25, x6, x13
+ adcs x21, x21, x24
+ mov x17, x15
+ mul x24, x6, x17
+ adcs x5, x22, x5
+ umulh x22, x6, x14
+ adcs x29, x7, x29
+ adcs x7, xzr, xzr
+ adds x22, x22, x24
+ umulh x24, x6, x17
+ adcs x24, x24, x25
+ umulh x25, x6, x13
+ mov x15, x13
+ adcs x25, x25, x28
+ umulh x28, x6, x30
+ mov x13, x30
+ adcs x27, x28, x27
+ umulh x28, x6, x11
+ adcs x26, x28, x26
+ umulh x28, x6, x12
+ mul x6, x6, x14
+ adcs x28, x28, xzr
+ cmn x6, x2
+ umulh x2, x23, x4
+ mul x6, x23, x4
+ adcs x3, x22, x3
+ umulh x22, x23, x0
+ adcs x19, x24, x19
+ mul x24, x23, x0
+ adcs x20, x25, x20
+ mul x25, x23, x18
+ adcs x21, x27, x21
+ mul x27, x23, x16
+ adcs x5, x26, x5
+ mul x26, x23, x1
+ adcs x29, x28, x29
+ umulh x28, x23, x9
+ adcs x7, x7, xzr
+ adds x26, x28, x26
+ umulh x28, x23, x1
+ adcs x27, x28, x27
+ umulh x28, x23, x16
+ adcs x25, x28, x25
+ umulh x28, x23, x18
+ mul x23, x23, x9
+ adcs x24, x28, x24
+ umulh x28, x8, x4
+ str x28, [sp, #16] // 8-byte Folded Spill
+ mul x28, x8, x4
+ adcs x6, x22, x6
+ adcs x2, x2, xzr
+ adds x3, x3, x23
+ adcs x19, x19, x26
+ mul x22, x3, x10
+ adcs x20, x20, x27
+ mul x23, x22, x12
+ mul x26, x22, x11
+ mul x27, x22, x13
+ adcs x21, x21, x25
+ mul x25, x22, x15
+ adcs x5, x5, x24
+ mul x24, x22, x17
+ adcs x4, x29, x6
+ umulh x6, x22, x14
+ adcs x2, x7, x2
+ adcs x7, xzr, xzr
+ adds x6, x6, x24
+ umulh x24, x22, x17
+ adcs x24, x24, x25
+ umulh x25, x22, x15
+ adcs x25, x25, x27
+ umulh x27, x22, x13
+ adcs x26, x27, x26
+ umulh x27, x22, x11
+ adcs x23, x27, x23
+ umulh x27, x22, x12
+ mul x22, x22, x14
+ adcs x27, x27, xzr
+ cmn x22, x3
+ umulh x3, x8, x0
+ mul x0, x8, x0
+ umulh x22, x8, x18
+ mul x18, x8, x18
+ umulh x29, x8, x16
+ mul x16, x8, x16
+ umulh x30, x8, x1
+ mul x1, x8, x1
+ umulh x10, x8, x9
+ mul x8, x8, x9
+ adcs x6, x6, x19
+ adcs x19, x24, x20
+ adcs x20, x25, x21
+ adcs x5, x26, x5
+ adcs x9, x23, x4
+ str x9, [sp, #8] // 8-byte Folded Spill
+ adcs x2, x27, x2
+ adcs x7, x7, xzr
+ adds x9, x10, x1
+ adcs x16, x30, x16
+ adcs x18, x29, x18
+ adcs x0, x22, x0
+ adcs x1, x3, x28
+ ldr x10, [sp, #16] // 8-byte Folded Reload
+ adcs x3, x10, xzr
+ adds x8, x6, x8
+ adcs x9, x19, x9
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ mul x4, x8, x10
+ adcs x16, x20, x16
+ umulh x6, x4, x12
+ mul x19, x4, x12
+ mov x30, x11
+ umulh x20, x4, x30
+ mul x21, x4, x30
+ umulh x22, x4, x13
+ mul x23, x4, x13
+ mov x29, x13
+ umulh x24, x4, x15
+ mul x25, x4, x15
+ umulh x26, x4, x17
+ mul x27, x4, x17
+ umulh x28, x4, x14
+ mul x4, x4, x14
+ adcs x18, x5, x18
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x10, x10, x0
+ adcs x0, x2, x1
+ adcs x1, x7, x3
+ adcs x2, xzr, xzr
+ adds x3, x28, x27
+ adcs x5, x26, x25
+ adcs x7, x24, x23
+ adcs x21, x22, x21
+ adcs x19, x20, x19
+ adcs x6, x6, xzr
+ cmn x4, x8
+ adcs x8, x3, x9
+ adcs x9, x5, x16
+ adcs x16, x7, x18
+ adcs x10, x21, x10
+ adcs x18, x19, x0
+ adcs x0, x6, x1
+ adcs x1, x2, xzr
+ subs x13, x8, x14
+ sbcs x12, x9, x17
+ sbcs x11, x16, x15
+ sbcs x14, x10, x29
+ sbcs x15, x18, x30
+ ldr x17, [sp, #32] // 8-byte Folded Reload
+ sbcs x17, x0, x17
+ sbcs x1, x1, xzr
+ tst x1, #0x1
+ csel x8, x8, x13, ne
+ csel x9, x9, x12, ne
+ csel x11, x16, x11, ne
+ csel x10, x10, x14, ne
+ csel x12, x18, x15, ne
+ csel x13, x0, x17, ne
+ ldr x14, [sp, #24] // 8-byte Folded Reload
+ stp x8, x9, [x14]
+ stp x11, x10, [x14, #16]
+ stp x12, x13, [x14, #32]
+ add sp, sp, #48 // =48
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end83:
+ .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L
+
+ .globl mcl_fp_montNF6L
+ .align 2
+ .type mcl_fp_montNF6L,@function
+mcl_fp_montNF6L: // @mcl_fp_montNF6L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #112 // =112
+ str x0, [sp, #96] // 8-byte Folded Spill
+ ldp x16, x12, [x1, #32]
+ ldp x13, x11, [x1, #16]
+ ldp x17, x0, [x1]
+ ldur x18, [x3, #-8]
+ ldr x9, [x3, #32]
+ str x9, [sp, #104] // 8-byte Folded Spill
+ ldr x14, [x3, #40]
+ ldp x4, x10, [x3, #16]
+ ldr x15, [x3]
+ str x15, [sp, #8] // 8-byte Folded Spill
+ ldr x9, [x3, #8]
+ ldp x5, x3, [x2]
+ ldp x6, x7, [x2, #16]
+ ldp x19, x2, [x2, #32]
+ umulh x20, x12, x5
+ mul x21, x12, x5
+ umulh x22, x16, x5
+ mul x23, x16, x5
+ umulh x24, x11, x5
+ mul x25, x11, x5
+ mov x1, x13
+ umulh x26, x1, x5
+ mul x27, x1, x5
+ mov x13, x0
+ umulh x28, x13, x5
+ mul x29, x13, x5
+ mov x8, x17
+ umulh x30, x8, x5
+ mul x5, x8, x5
+ adds x29, x30, x29
+ mul x30, x3, x12
+ adcs x27, x28, x27
+ mul x28, x3, x16
+ adcs x25, x26, x25
+ mul x26, x3, x11
+ adcs x23, x24, x23
+ mul x24, x5, x18
+ adcs x21, x22, x21
+ mul x22, x24, x15
+ adcs x20, x20, xzr
+ cmn x22, x5
+ mul x5, x3, x1
+ mov x0, x9
+ mul x22, x24, x0
+ adcs x22, x22, x29
+ mul x29, x24, x4
+ adcs x17, x29, x27
+ mul x29, x24, x10
+ adcs x25, x29, x25
+ ldr x9, [sp, #104] // 8-byte Folded Reload
+ mul x29, x24, x9
+ adcs x23, x29, x23
+ mul x29, x24, x14
+ adcs x21, x29, x21
+ umulh x29, x24, x15
+ adcs x20, x20, xzr
+ adds x22, x22, x29
+ umulh x29, x24, x0
+ adcs x15, x17, x29
+ umulh x29, x24, x4
+ mov x17, x4
+ adcs x25, x25, x29
+ umulh x29, x24, x10
+ adcs x23, x23, x29
+ umulh x29, x24, x9
+ adcs x21, x21, x29
+ mul x29, x3, x13
+ umulh x24, x24, x14
+ adcs x20, x20, x24
+ umulh x24, x3, x8
+ adds x24, x24, x29
+ umulh x29, x3, x13
+ adcs x5, x29, x5
+ umulh x29, x3, x1
+ adcs x26, x29, x26
+ umulh x29, x3, x11
+ adcs x28, x29, x28
+ umulh x29, x3, x16
+ adcs x29, x29, x30
+ umulh x30, x3, x12
+ mul x3, x3, x8
+ adcs x30, x30, xzr
+ adds x3, x3, x22
+ umulh x22, x6, x12
+ adcs x24, x24, x15
+ mul x27, x6, x12
+ adcs x5, x5, x25
+ mul x25, x6, x16
+ adcs x23, x26, x23
+ mul x26, x6, x11
+ adcs x21, x28, x21
+ mul x28, x3, x18
+ mov x4, x18
+ adcs x20, x29, x20
+ ldr x18, [sp, #8] // 8-byte Folded Reload
+ mul x29, x28, x18
+ adcs x30, x30, xzr
+ cmn x29, x3
+ mul x3, x6, x1
+ mul x29, x28, x0
+ adcs x24, x29, x24
+ mul x29, x28, x17
+ adcs x5, x29, x5
+ mul x29, x28, x10
+ adcs x23, x29, x23
+ mul x29, x28, x9
+ adcs x21, x29, x21
+ mul x29, x28, x14
+ adcs x20, x29, x20
+ umulh x29, x28, x18
+ adcs x30, x30, xzr
+ adds x24, x24, x29
+ umulh x29, x28, x0
+ adcs x5, x5, x29
+ umulh x29, x28, x17
+ adcs x23, x23, x29
+ umulh x29, x28, x10
+ adcs x21, x21, x29
+ umulh x29, x28, x9
+ adcs x20, x20, x29
+ mul x29, x6, x13
+ umulh x28, x28, x14
+ adcs x28, x30, x28
+ umulh x30, x6, x8
+ adds x29, x30, x29
+ umulh x30, x6, x13
+ adcs x3, x30, x3
+ umulh x30, x6, x1
+ adcs x26, x30, x26
+ umulh x30, x6, x11
+ adcs x25, x30, x25
+ umulh x30, x6, x16
+ mul x6, x6, x8
+ adcs x27, x30, x27
+ umulh x30, x7, x12
+ adcs x22, x22, xzr
+ adds x6, x6, x24
+ mul x24, x7, x12
+ adcs x5, x29, x5
+ umulh x29, x7, x16
+ adcs x3, x3, x23
+ mul x23, x7, x16
+ adcs x21, x26, x21
+ mul x26, x7, x11
+ adcs x20, x25, x20
+ mul x25, x6, x4
+ adcs x27, x27, x28
+ mul x28, x25, x18
+ adcs x22, x22, xzr
+ cmn x28, x6
+ mul x6, x7, x1
+ mul x28, x25, x0
+ adcs x5, x28, x5
+ mul x28, x25, x17
+ adcs x3, x28, x3
+ mul x28, x25, x10
+ adcs x21, x28, x21
+ mul x28, x25, x9
+ adcs x20, x28, x20
+ mul x28, x25, x14
+ adcs x27, x28, x27
+ umulh x28, x25, x18
+ adcs x22, x22, xzr
+ adds x5, x5, x28
+ umulh x28, x25, x0
+ adcs x3, x3, x28
+ umulh x28, x25, x17
+ adcs x21, x21, x28
+ umulh x28, x25, x10
+ adcs x20, x20, x28
+ umulh x28, x25, x9
+ adcs x27, x27, x28
+ mul x28, x7, x13
+ umulh x25, x25, x14
+ adcs x22, x22, x25
+ umulh x25, x7, x8
+ adds x25, x25, x28
+ umulh x28, x7, x13
+ adcs x6, x28, x6
+ umulh x28, x7, x1
+ adcs x26, x28, x26
+ umulh x28, x7, x11
+ mul x7, x7, x8
+ adcs x23, x28, x23
+ umulh x9, x19, x12
+ str x9, [sp, #16] // 8-byte Folded Spill
+ adcs x24, x29, x24
+ mul x9, x19, x12
+ str x9, [sp, #32] // 8-byte Folded Spill
+ adcs x30, x30, xzr
+ adds x5, x7, x5
+ umulh x7, x19, x16
+ adcs x3, x25, x3
+ mul x25, x19, x16
+ adcs x6, x6, x21
+ umulh x21, x19, x11
+ adcs x20, x26, x20
+ mul x26, x19, x11
+ adcs x23, x23, x27
+ mul x27, x5, x4
+ adcs x22, x24, x22
+ mul x24, x27, x18
+ adcs x30, x30, xzr
+ cmn x24, x5
+ mov x28, x1
+ mul x5, x19, x28
+ mul x24, x19, x13
+ umulh x1, x19, x8
+ umulh x9, x19, x13
+ umulh x15, x19, x28
+ mul x19, x19, x8
+ umulh x29, x2, x12
+ str x29, [sp, #88] // 8-byte Folded Spill
+ mul x29, x2, x12
+ umulh x12, x2, x16
+ str x12, [sp, #80] // 8-byte Folded Spill
+ mul x12, x2, x16
+ str x12, [sp, #72] // 8-byte Folded Spill
+ umulh x12, x2, x11
+ mul x11, x2, x11
+ stp x11, x12, [sp, #56]
+ umulh x11, x2, x28
+ str x11, [sp, #48] // 8-byte Folded Spill
+ mul x11, x2, x28
+ str x11, [sp, #40] // 8-byte Folded Spill
+ umulh x11, x2, x13
+ str x11, [sp, #24] // 8-byte Folded Spill
+ mul x13, x2, x13
+ umulh x16, x2, x8
+ mul x28, x2, x8
+ mul x2, x27, x0
+ adcs x2, x2, x3
+ mul x3, x27, x17
+ adcs x3, x3, x6
+ mul x6, x27, x10
+ adcs x6, x6, x20
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ mul x20, x27, x8
+ adcs x20, x20, x23
+ mul x23, x27, x14
+ adcs x22, x23, x22
+ adcs x23, x30, xzr
+ umulh x30, x27, x18
+ adds x2, x2, x30
+ umulh x30, x27, x0
+ adcs x3, x3, x30
+ umulh x30, x27, x17
+ mov x12, x17
+ adcs x6, x6, x30
+ umulh x30, x27, x10
+ adcs x20, x20, x30
+ umulh x30, x27, x8
+ mov x11, x8
+ adcs x22, x22, x30
+ mov x30, x14
+ umulh x27, x27, x30
+ adcs x23, x23, x27
+ adds x8, x1, x24
+ adcs x9, x9, x5
+ adcs x14, x15, x26
+ adcs x5, x21, x25
+ ldr x15, [sp, #32] // 8-byte Folded Reload
+ adcs x7, x7, x15
+ ldr x15, [sp, #16] // 8-byte Folded Reload
+ adcs x21, x15, xzr
+ adds x2, x19, x2
+ adcs x8, x8, x3
+ adcs x9, x9, x6
+ mov x24, x4
+ mul x3, x2, x24
+ adcs x14, x14, x20
+ mul x6, x3, x30
+ adcs x5, x5, x22
+ mul x19, x3, x11
+ adcs x7, x7, x23
+ mul x20, x3, x18
+ adcs x21, x21, xzr
+ cmn x20, x2
+ mul x2, x3, x10
+ mul x20, x3, x0
+ adcs x8, x20, x8
+ mul x20, x3, x12
+ adcs x9, x20, x9
+ umulh x20, x3, x30
+ adcs x14, x2, x14
+ umulh x2, x3, x11
+ mov x27, x11
+ adcs x5, x19, x5
+ mov x11, x10
+ umulh x19, x3, x11
+ adcs x6, x6, x7
+ umulh x7, x3, x18
+ adcs x21, x21, xzr
+ adds x8, x8, x7
+ umulh x7, x3, x12
+ umulh x3, x3, x0
+ adcs x9, x9, x3
+ adcs x10, x14, x7
+ adcs x3, x5, x19
+ adcs x2, x6, x2
+ adcs x5, x21, x20
+ adds x15, x16, x13
+ ldr x13, [sp, #40] // 8-byte Folded Reload
+ ldr x14, [sp, #24] // 8-byte Folded Reload
+ adcs x16, x14, x13
+ ldp x14, x13, [sp, #48]
+ adcs x17, x14, x13
+ ldp x14, x13, [sp, #64]
+ adcs x1, x14, x13
+ ldr x13, [sp, #80] // 8-byte Folded Reload
+ adcs x4, x13, x29
+ ldr x13, [sp, #88] // 8-byte Folded Reload
+ adcs x6, x13, xzr
+ adds x8, x28, x8
+ adcs x9, x15, x9
+ mul x15, x8, x24
+ adcs x10, x16, x10
+ mul x16, x15, x30
+ mul x14, x15, x27
+ mul x7, x15, x11
+ mul x19, x15, x12
+ mul x20, x15, x0
+ mul x21, x15, x18
+ umulh x22, x15, x30
+ umulh x23, x15, x27
+ umulh x24, x15, x11
+ mov x28, x11
+ umulh x25, x15, x12
+ umulh x26, x15, x0
+ umulh x15, x15, x18
+ adcs x17, x17, x3
+ adcs x1, x1, x2
+ adcs x2, x4, x5
+ adcs x3, x6, xzr
+ cmn x21, x8
+ adcs x8, x20, x9
+ adcs x9, x19, x10
+ adcs x10, x7, x17
+ adcs x17, x14, x1
+ adcs x16, x16, x2
+ adcs x11, x3, xzr
+ adds x8, x8, x15
+ adcs x9, x9, x26
+ adcs x10, x10, x25
+ adcs x15, x17, x24
+ adcs x16, x16, x23
+ adcs x17, x11, x22
+ subs x3, x8, x18
+ sbcs x2, x9, x0
+ sbcs x11, x10, x12
+ sbcs x14, x15, x28
+ sbcs x18, x16, x27
+ sbcs x0, x17, x30
+ asr x1, x0, #63
+ cmp x1, #0 // =0
+ csel x8, x8, x3, lt
+ csel x9, x9, x2, lt
+ csel x10, x10, x11, lt
+ csel x11, x15, x14, lt
+ csel x12, x16, x18, lt
+ csel x13, x17, x0, lt
+ ldr x14, [sp, #96] // 8-byte Folded Reload
+ stp x8, x9, [x14]
+ stp x10, x11, [x14, #16]
+ stp x12, x13, [x14, #32]
+ add sp, sp, #112 // =112
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end84:
+ .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L
+
+ .globl mcl_fp_montRed6L
+ .align 2
+ .type mcl_fp_montRed6L,@function
+mcl_fp_montRed6L: // @mcl_fp_montRed6L
+// BB#0:
+ stp x26, x25, [sp, #-64]!
+ stp x24, x23, [sp, #16]
+ stp x22, x21, [sp, #32]
+ stp x20, x19, [sp, #48]
+ ldur x14, [x2, #-8]
+ ldp x9, x8, [x2, #32]
+ ldp x11, x10, [x2, #16]
+ ldp x13, x12, [x2]
+ ldp x16, x17, [x1, #80]
+ ldp x18, x2, [x1, #64]
+ ldp x3, x4, [x1, #48]
+ ldp x5, x6, [x1, #32]
+ ldp x7, x19, [x1, #16]
+ ldp x15, x1, [x1]
+ mul x20, x15, x14
+ mul x21, x20, x8
+ mul x22, x20, x9
+ mul x23, x20, x10
+ mul x24, x20, x11
+ mul x25, x20, x12
+ umulh x26, x20, x13
+ adds x25, x26, x25
+ umulh x26, x20, x12
+ adcs x24, x26, x24
+ umulh x26, x20, x11
+ adcs x23, x26, x23
+ umulh x26, x20, x10
+ adcs x22, x26, x22
+ umulh x26, x20, x9
+ adcs x21, x26, x21
+ umulh x26, x20, x8
+ mul x20, x20, x13
+ adcs x26, x26, xzr
+ cmn x15, x20
+ adcs x15, x1, x25
+ adcs x1, x7, x24
+ mul x7, x15, x14
+ adcs x19, x19, x23
+ mul x20, x7, x8
+ mul x23, x7, x9
+ mul x24, x7, x10
+ mul x25, x7, x11
+ adcs x5, x5, x22
+ mul x22, x7, x12
+ adcs x6, x6, x21
+ umulh x21, x7, x13
+ adcs x3, x3, x26
+ adcs x4, x4, xzr
+ adcs x18, x18, xzr
+ adcs x2, x2, xzr
+ adcs x16, x16, xzr
+ adcs x17, x17, xzr
+ adcs x26, xzr, xzr
+ adds x21, x21, x22
+ umulh x22, x7, x12
+ adcs x22, x22, x25
+ umulh x25, x7, x11
+ adcs x24, x25, x24
+ umulh x25, x7, x10
+ adcs x23, x25, x23
+ umulh x25, x7, x9
+ adcs x20, x25, x20
+ umulh x25, x7, x8
+ mul x7, x7, x13
+ adcs x25, x25, xzr
+ cmn x7, x15
+ adcs x15, x21, x1
+ adcs x1, x22, x19
+ mul x7, x15, x14
+ adcs x5, x24, x5
+ mul x19, x7, x8
+ mul x21, x7, x9
+ mul x22, x7, x10
+ adcs x6, x23, x6
+ mul x23, x7, x11
+ adcs x3, x20, x3
+ mul x20, x7, x12
+ adcs x4, x25, x4
+ umulh x24, x7, x13
+ adcs x18, x18, xzr
+ adcs x2, x2, xzr
+ adcs x16, x16, xzr
+ adcs x17, x17, xzr
+ adcs x25, x26, xzr
+ adds x20, x24, x20
+ umulh x24, x7, x12
+ adcs x23, x24, x23
+ umulh x24, x7, x11
+ adcs x22, x24, x22
+ umulh x24, x7, x10
+ adcs x21, x24, x21
+ umulh x24, x7, x9
+ adcs x19, x24, x19
+ umulh x24, x7, x8
+ mul x7, x7, x13
+ adcs x24, x24, xzr
+ cmn x7, x15
+ adcs x15, x20, x1
+ adcs x1, x23, x5
+ mul x5, x15, x14
+ adcs x6, x22, x6
+ mul x7, x5, x8
+ mul x20, x5, x9
+ mul x22, x5, x10
+ adcs x3, x21, x3
+ mul x21, x5, x11
+ adcs x4, x19, x4
+ mul x19, x5, x12
+ adcs x18, x24, x18
+ umulh x23, x5, x13
+ adcs x2, x2, xzr
+ adcs x16, x16, xzr
+ adcs x17, x17, xzr
+ adcs x24, x25, xzr
+ adds x19, x23, x19
+ umulh x23, x5, x12
+ adcs x21, x23, x21
+ umulh x23, x5, x11
+ adcs x22, x23, x22
+ umulh x23, x5, x10
+ adcs x20, x23, x20
+ umulh x23, x5, x9
+ adcs x7, x23, x7
+ umulh x23, x5, x8
+ mul x5, x5, x13
+ adcs x23, x23, xzr
+ cmn x5, x15
+ adcs x15, x19, x1
+ adcs x1, x21, x6
+ mul x5, x15, x14
+ adcs x3, x22, x3
+ mul x6, x5, x8
+ mul x19, x5, x9
+ mul x21, x5, x10
+ adcs x4, x20, x4
+ mul x20, x5, x11
+ adcs x18, x7, x18
+ mul x7, x5, x12
+ adcs x2, x23, x2
+ umulh x22, x5, x13
+ adcs x16, x16, xzr
+ adcs x17, x17, xzr
+ adcs x23, x24, xzr
+ adds x7, x22, x7
+ umulh x22, x5, x12
+ adcs x20, x22, x20
+ umulh x22, x5, x11
+ adcs x21, x22, x21
+ umulh x22, x5, x10
+ adcs x19, x22, x19
+ umulh x22, x5, x9
+ adcs x6, x22, x6
+ umulh x22, x5, x8
+ mul x5, x5, x13
+ adcs x22, x22, xzr
+ cmn x5, x15
+ adcs x15, x7, x1
+ adcs x1, x20, x3
+ mul x14, x15, x14
+ adcs x3, x21, x4
+ mul x4, x14, x8
+ mul x5, x14, x9
+ mul x7, x14, x10
+ adcs x18, x19, x18
+ mul x19, x14, x11
+ adcs x2, x6, x2
+ mul x6, x14, x12
+ adcs x16, x22, x16
+ umulh x20, x14, x13
+ adcs x17, x17, xzr
+ adcs x21, x23, xzr
+ adds x6, x20, x6
+ umulh x20, x14, x12
+ adcs x19, x20, x19
+ umulh x20, x14, x11
+ adcs x7, x20, x7
+ umulh x20, x14, x10
+ adcs x5, x20, x5
+ umulh x20, x14, x9
+ adcs x4, x20, x4
+ umulh x20, x14, x8
+ mul x14, x14, x13
+ adcs x20, x20, xzr
+ cmn x14, x15
+ adcs x14, x6, x1
+ adcs x15, x19, x3
+ adcs x18, x7, x18
+ adcs x1, x5, x2
+ adcs x16, x4, x16
+ adcs x17, x20, x17
+ adcs x2, x21, xzr
+ subs x13, x14, x13
+ sbcs x12, x15, x12
+ sbcs x11, x18, x11
+ sbcs x10, x1, x10
+ sbcs x9, x16, x9
+ sbcs x8, x17, x8
+ sbcs x2, x2, xzr
+ tst x2, #0x1
+ csel x13, x14, x13, ne
+ csel x12, x15, x12, ne
+ csel x11, x18, x11, ne
+ csel x10, x1, x10, ne
+ csel x9, x16, x9, ne
+ csel x8, x17, x8, ne
+ stp x13, x12, [x0]
+ stp x11, x10, [x0, #16]
+ stp x9, x8, [x0, #32]
+ ldp x20, x19, [sp, #48]
+ ldp x22, x21, [sp, #32]
+ ldp x24, x23, [sp, #16]
+ ldp x26, x25, [sp], #64
+ ret
+.Lfunc_end85:
+ .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L
+
+ .globl mcl_fp_addPre6L
+ .align 2
+ .type mcl_fp_addPre6L,@function
+mcl_fp_addPre6L: // @mcl_fp_addPre6L
+// BB#0:
+ ldp x8, x9, [x2, #32]
+ ldp x10, x11, [x1, #32]
+ ldp x12, x13, [x2, #16]
+ ldp x14, x15, [x2]
+ ldp x16, x17, [x1]
+ ldp x18, x1, [x1, #16]
+ adds x14, x14, x16
+ str x14, [x0]
+ adcs x14, x15, x17
+ adcs x12, x12, x18
+ stp x14, x12, [x0, #8]
+ adcs x12, x13, x1
+ adcs x8, x8, x10
+ stp x12, x8, [x0, #24]
+ adcs x9, x9, x11
+ adcs x8, xzr, xzr
+ str x9, [x0, #40]
+ mov x0, x8
+ ret
+.Lfunc_end86:
+ .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L
+
+ .globl mcl_fp_subPre6L
+ .align 2
+ .type mcl_fp_subPre6L,@function
+mcl_fp_subPre6L: // @mcl_fp_subPre6L
+// BB#0:
+ ldp x8, x9, [x2, #32]
+ ldp x10, x11, [x1, #32]
+ ldp x12, x13, [x2, #16]
+ ldp x14, x15, [x2]
+ ldp x16, x17, [x1]
+ ldp x18, x1, [x1, #16]
+ subs x14, x16, x14
+ str x14, [x0]
+ sbcs x14, x17, x15
+ sbcs x12, x18, x12
+ stp x14, x12, [x0, #8]
+ sbcs x12, x1, x13
+ sbcs x8, x10, x8
+ stp x12, x8, [x0, #24]
+ sbcs x9, x11, x9
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ str x9, [x0, #40]
+ mov x0, x8
+ ret
+.Lfunc_end87:
+ .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L
+
+ .globl mcl_fp_shr1_6L
+ .align 2
+ .type mcl_fp_shr1_6L,@function
+mcl_fp_shr1_6L: // @mcl_fp_shr1_6L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x1, #16]
+ ldp x12, x13, [x1, #32]
+ extr x8, x9, x8, #1
+ extr x9, x10, x9, #1
+ extr x10, x11, x10, #1
+ extr x11, x12, x11, #1
+ extr x12, x13, x12, #1
+ lsr x13, x13, #1
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ stp x12, x13, [x0, #32]
+ ret
+.Lfunc_end88:
+ .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L
+
+ .globl mcl_fp_add6L
+ .align 2
+ .type mcl_fp_add6L,@function
+mcl_fp_add6L: // @mcl_fp_add6L
+// BB#0:
+ ldp x8, x9, [x2, #32]
+ ldp x10, x11, [x1, #32]
+ ldp x12, x13, [x2, #16]
+ ldp x14, x15, [x2]
+ ldp x16, x17, [x1]
+ ldp x18, x1, [x1, #16]
+ adds x14, x14, x16
+ adcs x15, x15, x17
+ ldp x16, x17, [x3, #32]
+ adcs x18, x12, x18
+ adcs x1, x13, x1
+ ldp x12, x2, [x3]
+ stp x14, x15, [x0]
+ stp x18, x1, [x0, #16]
+ adcs x8, x8, x10
+ adcs x4, x9, x11
+ stp x8, x4, [x0, #32]
+ adcs x5, xzr, xzr
+ ldp x9, x10, [x3, #16]
+ subs x13, x14, x12
+ sbcs x12, x15, x2
+ sbcs x11, x18, x9
+ sbcs x10, x1, x10
+ sbcs x9, x8, x16
+ sbcs x8, x4, x17
+ sbcs x14, x5, xzr
+ and w14, w14, #0x1
+ tbnz w14, #0, .LBB89_2
+// BB#1: // %nocarry
+ stp x13, x12, [x0]
+ stp x11, x10, [x0, #16]
+ stp x9, x8, [x0, #32]
+.LBB89_2: // %carry
+ ret
+.Lfunc_end89:
+ .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L
+
+ .globl mcl_fp_addNF6L
+ .align 2
+ .type mcl_fp_addNF6L,@function
+mcl_fp_addNF6L: // @mcl_fp_addNF6L
+// BB#0:
+ ldp x8, x9, [x1, #32]
+ ldp x10, x11, [x2, #32]
+ ldp x12, x13, [x1, #16]
+ ldp x14, x15, [x1]
+ ldp x16, x17, [x2]
+ ldp x18, x1, [x2, #16]
+ adds x14, x16, x14
+ adcs x15, x17, x15
+ ldp x16, x17, [x3, #32]
+ adcs x12, x18, x12
+ adcs x13, x1, x13
+ ldp x18, x1, [x3]
+ adcs x8, x10, x8
+ ldp x10, x2, [x3, #16]
+ adcs x9, x11, x9
+ subs x11, x14, x18
+ sbcs x18, x15, x1
+ sbcs x10, x12, x10
+ sbcs x1, x13, x2
+ sbcs x16, x8, x16
+ sbcs x17, x9, x17
+ asr x2, x17, #63
+ cmp x2, #0 // =0
+ csel x11, x14, x11, lt
+ csel x14, x15, x18, lt
+ csel x10, x12, x10, lt
+ csel x12, x13, x1, lt
+ csel x8, x8, x16, lt
+ csel x9, x9, x17, lt
+ stp x11, x14, [x0]
+ stp x10, x12, [x0, #16]
+ stp x8, x9, [x0, #32]
+ ret
+.Lfunc_end90:
+ .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L
+
+ .globl mcl_fp_sub6L
+ .align 2
+ .type mcl_fp_sub6L,@function
+mcl_fp_sub6L: // @mcl_fp_sub6L
+// BB#0:
+ ldp x12, x13, [x2, #32]
+ ldp x14, x15, [x1, #32]
+ ldp x10, x11, [x2, #16]
+ ldp x8, x9, [x2]
+ ldp x16, x17, [x1]
+ ldp x18, x1, [x1, #16]
+ subs x8, x16, x8
+ sbcs x9, x17, x9
+ stp x8, x9, [x0]
+ sbcs x10, x18, x10
+ sbcs x11, x1, x11
+ stp x10, x11, [x0, #16]
+ sbcs x12, x14, x12
+ sbcs x13, x15, x13
+ stp x12, x13, [x0, #32]
+ ngcs x14, xzr
+ and w14, w14, #0x1
+ tbnz w14, #0, .LBB91_2
+// BB#1: // %nocarry
+ ret
+.LBB91_2: // %carry
+ ldp x14, x15, [x3, #32]
+ ldp x16, x17, [x3]
+ ldp x18, x1, [x3, #16]
+ adds x8, x16, x8
+ adcs x9, x17, x9
+ adcs x10, x18, x10
+ adcs x11, x1, x11
+ adcs x12, x14, x12
+ adcs x13, x15, x13
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ stp x12, x13, [x0, #32]
+ ret
+.Lfunc_end91:
+ .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L
+
+ .globl mcl_fp_subNF6L
+ .align 2
+ .type mcl_fp_subNF6L,@function
+mcl_fp_subNF6L: // @mcl_fp_subNF6L
+// BB#0:
+ ldp x8, x9, [x2, #32]
+ ldp x10, x11, [x1, #32]
+ ldp x12, x13, [x2, #16]
+ ldp x14, x18, [x2]
+ ldp x16, x17, [x1, #16]
+ ldp x15, x1, [x1]
+ subs x14, x15, x14
+ ldp x15, x2, [x3, #32]
+ sbcs x18, x1, x18
+ sbcs x12, x16, x12
+ ldp x16, x1, [x3, #16]
+ sbcs x13, x17, x13
+ ldp x17, x3, [x3]
+ sbcs x8, x10, x8
+ sbcs x9, x11, x9
+ asr x10, x9, #63
+ adds x11, x10, x10
+ and x16, x10, x16
+ and x1, x10, x1
+ and x15, x10, x15
+ and x2, x10, x2
+ adcs x10, x10, x10
+ orr x11, x11, x9, lsr #63
+ and x11, x11, x17
+ and x10, x10, x3
+ adds x11, x11, x14
+ adcs x10, x10, x18
+ stp x11, x10, [x0]
+ adcs x10, x16, x12
+ str x10, [x0, #16]
+ adcs x10, x1, x13
+ adcs x8, x15, x8
+ stp x10, x8, [x0, #24]
+ adcs x8, x2, x9
+ str x8, [x0, #40]
+ ret
+.Lfunc_end92:
+ .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L
+
+ .globl mcl_fpDbl_add6L
+ .align 2
+ .type mcl_fpDbl_add6L,@function
+mcl_fpDbl_add6L: // @mcl_fpDbl_add6L
+// BB#0:
+ stp x26, x25, [sp, #-64]!
+ stp x24, x23, [sp, #16]
+ stp x22, x21, [sp, #32]
+ stp x20, x19, [sp, #48]
+ ldp x8, x9, [x2, #80]
+ ldp x10, x11, [x1, #80]
+ ldp x12, x13, [x2, #64]
+ ldp x14, x15, [x1, #64]
+ ldp x16, x17, [x2, #48]
+ ldp x18, x4, [x1, #48]
+ ldp x5, x6, [x2, #32]
+ ldp x7, x19, [x1, #32]
+ ldp x20, x21, [x2, #16]
+ ldp x23, x2, [x2]
+ ldp x24, x25, [x1, #16]
+ ldp x22, x1, [x1]
+ adds x22, x23, x22
+ str x22, [x0]
+ ldp x22, x23, [x3, #32]
+ adcs x1, x2, x1
+ str x1, [x0, #8]
+ ldp x1, x2, [x3, #16]
+ adcs x20, x20, x24
+ ldp x24, x3, [x3]
+ str x20, [x0, #16]
+ adcs x20, x21, x25
+ adcs x5, x5, x7
+ stp x20, x5, [x0, #24]
+ adcs x5, x6, x19
+ str x5, [x0, #40]
+ adcs x16, x16, x18
+ adcs x17, x17, x4
+ adcs x12, x12, x14
+ adcs x13, x13, x15
+ adcs x8, x8, x10
+ adcs x9, x9, x11
+ adcs x10, xzr, xzr
+ subs x11, x16, x24
+ sbcs x14, x17, x3
+ sbcs x15, x12, x1
+ sbcs x18, x13, x2
+ sbcs x1, x8, x22
+ sbcs x2, x9, x23
+ sbcs x10, x10, xzr
+ tst x10, #0x1
+ csel x10, x16, x11, ne
+ csel x11, x17, x14, ne
+ csel x12, x12, x15, ne
+ csel x13, x13, x18, ne
+ csel x8, x8, x1, ne
+ csel x9, x9, x2, ne
+ stp x10, x11, [x0, #48]
+ stp x12, x13, [x0, #64]
+ stp x8, x9, [x0, #80]
+ ldp x20, x19, [sp, #48]
+ ldp x22, x21, [sp, #32]
+ ldp x24, x23, [sp, #16]
+ ldp x26, x25, [sp], #64
+ ret
+.Lfunc_end93:
+ .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L
+
+ .globl mcl_fpDbl_sub6L
+ .align 2
+ .type mcl_fpDbl_sub6L,@function
+mcl_fpDbl_sub6L: // @mcl_fpDbl_sub6L
+// BB#0:
+ stp x26, x25, [sp, #-64]!
+ stp x24, x23, [sp, #16]
+ stp x22, x21, [sp, #32]
+ stp x20, x19, [sp, #48]
+ ldp x8, x9, [x2, #80]
+ ldp x10, x11, [x1, #80]
+ ldp x12, x13, [x2, #64]
+ ldp x14, x15, [x1, #64]
+ ldp x16, x17, [x2, #48]
+ ldp x18, x4, [x1, #48]
+ ldp x5, x6, [x2, #32]
+ ldp x7, x19, [x1, #32]
+ ldp x20, x21, [x2, #16]
+ ldp x22, x2, [x2]
+ ldp x24, x25, [x1, #16]
+ ldp x23, x1, [x1]
+ subs x22, x23, x22
+ str x22, [x0]
+ ldp x22, x23, [x3, #32]
+ sbcs x1, x1, x2
+ str x1, [x0, #8]
+ ldp x1, x2, [x3, #16]
+ sbcs x20, x24, x20
+ ldp x24, x3, [x3]
+ str x20, [x0, #16]
+ sbcs x20, x25, x21
+ sbcs x5, x7, x5
+ stp x20, x5, [x0, #24]
+ sbcs x5, x19, x6
+ sbcs x16, x18, x16
+ sbcs x17, x4, x17
+ sbcs x12, x14, x12
+ sbcs x13, x15, x13
+ sbcs x8, x10, x8
+ sbcs x9, x11, x9
+ ngcs x10, xzr
+ tst x10, #0x1
+ csel x10, x23, xzr, ne
+ csel x11, x22, xzr, ne
+ csel x14, x2, xzr, ne
+ csel x15, x1, xzr, ne
+ csel x18, x3, xzr, ne
+ csel x1, x24, xzr, ne
+ adds x16, x1, x16
+ stp x5, x16, [x0, #40]
+ adcs x16, x18, x17
+ adcs x12, x15, x12
+ stp x16, x12, [x0, #56]
+ adcs x12, x14, x13
+ adcs x8, x11, x8
+ stp x12, x8, [x0, #72]
+ adcs x8, x10, x9
+ str x8, [x0, #88]
+ ldp x20, x19, [sp, #48]
+ ldp x22, x21, [sp, #32]
+ ldp x24, x23, [sp, #16]
+ ldp x26, x25, [sp], #64
+ ret
+.Lfunc_end94:
+ .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L
+
+ .globl mcl_fp_mulUnitPre7L
+ .align 2
+ .type mcl_fp_mulUnitPre7L,@function
+mcl_fp_mulUnitPre7L: // @mcl_fp_mulUnitPre7L
+// BB#0:
+ ldp x10, x8, [x1, #40]
+ ldp x14, x9, [x1, #24]
+ ldp x11, x12, [x1]
+ ldr x13, [x1, #16]
+ mul x15, x11, x2
+ mul x16, x12, x2
+ umulh x11, x11, x2
+ mul x17, x13, x2
+ umulh x12, x12, x2
+ mul x18, x14, x2
+ umulh x13, x13, x2
+ mul x1, x9, x2
+ umulh x14, x14, x2
+ mul x3, x10, x2
+ umulh x9, x9, x2
+ mul x4, x8, x2
+ umulh x10, x10, x2
+ umulh x8, x8, x2
+ adds x11, x11, x16
+ stp x15, x11, [x0]
+ adcs x11, x12, x17
+ str x11, [x0, #16]
+ adcs x11, x13, x18
+ str x11, [x0, #24]
+ adcs x11, x14, x1
+ adcs x9, x9, x3
+ stp x11, x9, [x0, #32]
+ adcs x9, x10, x4
+ adcs x8, x8, xzr
+ stp x9, x8, [x0, #48]
+ ret
+.Lfunc_end95:
+ .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L
+
+ .globl mcl_fpDbl_mulPre7L
+ .align 2
+ .type mcl_fpDbl_mulPre7L,@function
+mcl_fpDbl_mulPre7L: // @mcl_fpDbl_mulPre7L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #624 // =624
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x1, #24]
+ ldp x12, x13, [x1, #40]
+ ldp x14, x15, [x2]
+ ldp x16, x18, [x1, #16]
+ mul x17, x8, x14
+ str x17, [sp, #528] // 8-byte Folded Spill
+ umulh x17, x13, x14
+ str x17, [sp, #616] // 8-byte Folded Spill
+ mul x17, x13, x14
+ str x17, [sp, #608] // 8-byte Folded Spill
+ umulh x17, x12, x14
+ str x17, [sp, #592] // 8-byte Folded Spill
+ mul x17, x12, x14
+ str x17, [sp, #568] // 8-byte Folded Spill
+ umulh x17, x11, x14
+ str x17, [sp, #552] // 8-byte Folded Spill
+ mul x17, x11, x14
+ str x17, [sp, #512] // 8-byte Folded Spill
+ umulh x17, x10, x14
+ str x17, [sp, #496] // 8-byte Folded Spill
+ mul x17, x10, x14
+ str x17, [sp, #456] // 8-byte Folded Spill
+ umulh x17, x16, x14
+ str x17, [sp, #424] // 8-byte Folded Spill
+ mul x17, x16, x14
+ str x17, [sp, #368] // 8-byte Folded Spill
+ umulh x17, x9, x14
+ str x17, [sp, #352] // 8-byte Folded Spill
+ mul x17, x9, x14
+ str x17, [sp, #304] // 8-byte Folded Spill
+ umulh x14, x8, x14
+ str x14, [sp, #272] // 8-byte Folded Spill
+ mul x14, x13, x15
+ str x14, [sp, #560] // 8-byte Folded Spill
+ mul x14, x12, x15
+ str x14, [sp, #520] // 8-byte Folded Spill
+ mul x14, x11, x15
+ str x14, [sp, #488] // 8-byte Folded Spill
+ mul x14, x10, x15
+ str x14, [sp, #448] // 8-byte Folded Spill
+ mul x14, x16, x15
+ umulh x13, x13, x15
+ str x13, [sp, #600] // 8-byte Folded Spill
+ umulh x12, x12, x15
+ str x12, [sp, #576] // 8-byte Folded Spill
+ umulh x11, x11, x15
+ str x11, [sp, #544] // 8-byte Folded Spill
+ umulh x10, x10, x15
+ str x10, [sp, #504] // 8-byte Folded Spill
+ umulh x10, x16, x15
+ str x10, [sp, #472] // 8-byte Folded Spill
+ mul x10, x9, x15
+ str x10, [sp, #208] // 8-byte Folded Spill
+ umulh x9, x9, x15
+ stp x9, x14, [sp, #400]
+ mul x9, x8, x15
+ str x9, [sp, #96] // 8-byte Folded Spill
+ umulh x8, x8, x15
+ str x8, [sp, #320] // 8-byte Folded Spill
+ ldp x9, x11, [x1]
+ ldp x10, x17, [x2, #16]
+ ldp x12, x13, [x1, #16]
+ ldp x14, x16, [x1, #32]
+ ldr x15, [x1, #48]
+ mul x8, x9, x10
+ str x8, [sp, #248] // 8-byte Folded Spill
+ mul x8, x15, x10
+ str x8, [sp, #392] // 8-byte Folded Spill
+ mul x8, x16, x10
+ str x8, [sp, #344] // 8-byte Folded Spill
+ mul x8, x14, x10
+ str x8, [sp, #296] // 8-byte Folded Spill
+ mul x8, x13, x10
+ str x8, [sp, #240] // 8-byte Folded Spill
+ mul x8, x12, x10
+ str x8, [sp, #192] // 8-byte Folded Spill
+ mul x8, x11, x10
+ str x8, [sp, #136] // 8-byte Folded Spill
+ umulh x8, x15, x10
+ str x8, [sp, #440] // 8-byte Folded Spill
+ umulh x8, x16, x10
+ str x8, [sp, #384] // 8-byte Folded Spill
+ umulh x8, x14, x10
+ str x8, [sp, #336] // 8-byte Folded Spill
+ umulh x8, x13, x10
+ str x8, [sp, #288] // 8-byte Folded Spill
+ umulh x8, x12, x10
+ str x8, [sp, #232] // 8-byte Folded Spill
+ umulh x8, x11, x10
+ str x8, [sp, #184] // 8-byte Folded Spill
+ umulh x8, x9, x10
+ str x8, [sp, #128] // 8-byte Folded Spill
+ mul x8, x15, x17
+ str x8, [sp, #464] // 8-byte Folded Spill
+ umulh x8, x15, x17
+ str x8, [sp, #584] // 8-byte Folded Spill
+ mul x8, x16, x17
+ str x8, [sp, #376] // 8-byte Folded Spill
+ umulh x8, x16, x17
+ str x8, [sp, #536] // 8-byte Folded Spill
+ mul x8, x14, x17
+ str x8, [sp, #312] // 8-byte Folded Spill
+ umulh x8, x14, x17
+ str x8, [sp, #480] // 8-byte Folded Spill
+ mul x8, x13, x17
+ str x8, [sp, #224] // 8-byte Folded Spill
+ umulh x8, x13, x17
+ str x8, [sp, #416] // 8-byte Folded Spill
+ mul x8, x12, x17
+ str x8, [sp, #144] // 8-byte Folded Spill
+ umulh x8, x12, x17
+ str x8, [sp, #328] // 8-byte Folded Spill
+ mul x8, x11, x17
+ str x8, [sp, #80] // 8-byte Folded Spill
+ umulh x8, x11, x17
+ str x8, [sp, #264] // 8-byte Folded Spill
+ mul x28, x9, x17
+ umulh x8, x9, x17
+ str x8, [sp, #176] // 8-byte Folded Spill
+ ldp x14, x12, [x1, #24]
+ ldp x10, x9, [x1]
+ ldr x7, [x1, #16]
+ ldp x30, x5, [x1, #40]
+ ldp x27, x8, [x2, #32]
+ ldr x13, [x1, #48]
+ mul x11, x10, x27
+ str x11, [sp, #48] // 8-byte Folded Spill
+ mul x11, x5, x27
+ str x11, [sp, #168] // 8-byte Folded Spill
+ mul x11, x30, x27
+ str x11, [sp, #120] // 8-byte Folded Spill
+ mul x11, x12, x27
+ str x11, [sp, #72] // 8-byte Folded Spill
+ mul x11, x14, x27
+ str x11, [sp, #40] // 8-byte Folded Spill
+ mul x11, x7, x27
+ str x11, [sp, #16] // 8-byte Folded Spill
+ mul x24, x9, x27
+ umulh x11, x5, x27
+ str x11, [sp, #216] // 8-byte Folded Spill
+ umulh x11, x30, x27
+ str x11, [sp, #160] // 8-byte Folded Spill
+ umulh x11, x12, x27
+ str x11, [sp, #112] // 8-byte Folded Spill
+ umulh x11, x14, x27
+ str x11, [sp, #64] // 8-byte Folded Spill
+ umulh x11, x7, x27
+ str x11, [sp, #32] // 8-byte Folded Spill
+ umulh x29, x9, x27
+ umulh x23, x10, x27
+ mul x11, x5, x8
+ str x11, [sp, #256] // 8-byte Folded Spill
+ umulh x11, x5, x8
+ str x11, [sp, #432] // 8-byte Folded Spill
+ mul x11, x30, x8
+ str x11, [sp, #152] // 8-byte Folded Spill
+ umulh x11, x30, x8
+ str x11, [sp, #360] // 8-byte Folded Spill
+ mul x11, x12, x8
+ str x11, [sp, #88] // 8-byte Folded Spill
+ umulh x11, x12, x8
+ str x11, [sp, #280] // 8-byte Folded Spill
+ mul x11, x14, x8
+ str x11, [sp, #24] // 8-byte Folded Spill
+ umulh x11, x14, x8
+ str x11, [sp, #200] // 8-byte Folded Spill
+ mul x25, x7, x8
+ umulh x11, x7, x8
+ str x11, [sp, #104] // 8-byte Folded Spill
+ mul x22, x9, x8
+ umulh x9, x9, x8
+ str x9, [sp, #56] // 8-byte Folded Spill
+ mul x20, x10, x8
+ umulh x26, x10, x8
+ ldr x10, [x2, #48]
+ ldp x2, x8, [x1]
+ ldr x9, [x1, #16]
+ ldp x11, x1, [x1, #32]
+ mul x27, x2, x10
+ umulh x21, x2, x10
+ mul x5, x8, x10
+ umulh x19, x8, x10
+ mul x3, x9, x10
+ umulh x7, x9, x10
+ mul x2, x18, x10
+ umulh x6, x18, x10
+ mul x17, x11, x10
+ umulh x4, x11, x10
+ mul x16, x1, x10
+ umulh x1, x1, x10
+ mul x15, x13, x10
+ umulh x18, x13, x10
+ ldr x8, [sp, #528] // 8-byte Folded Reload
+ str x8, [x0]
+ ldr x8, [sp, #304] // 8-byte Folded Reload
+ ldr x9, [sp, #272] // 8-byte Folded Reload
+ adds x13, x9, x8
+ ldr x8, [sp, #368] // 8-byte Folded Reload
+ ldr x9, [sp, #352] // 8-byte Folded Reload
+ adcs x8, x9, x8
+ ldr x9, [sp, #456] // 8-byte Folded Reload
+ ldr x10, [sp, #424] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ ldr x10, [sp, #512] // 8-byte Folded Reload
+ ldr x11, [sp, #496] // 8-byte Folded Reload
+ adcs x10, x11, x10
+ ldr x11, [sp, #568] // 8-byte Folded Reload
+ ldr x12, [sp, #552] // 8-byte Folded Reload
+ adcs x11, x12, x11
+ ldr x12, [sp, #608] // 8-byte Folded Reload
+ ldr x14, [sp, #592] // 8-byte Folded Reload
+ adcs x12, x14, x12
+ ldr x14, [sp, #616] // 8-byte Folded Reload
+ adcs x14, x14, xzr
+ ldr x30, [sp, #96] // 8-byte Folded Reload
+ adds x13, x30, x13
+ str x13, [x0, #8]
+ ldr x13, [sp, #208] // 8-byte Folded Reload
+ adcs x8, x13, x8
+ ldr x13, [sp, #408] // 8-byte Folded Reload
+ adcs x9, x13, x9
+ ldr x13, [sp, #448] // 8-byte Folded Reload
+ adcs x10, x13, x10
+ ldr x13, [sp, #488] // 8-byte Folded Reload
+ adcs x11, x13, x11
+ ldr x13, [sp, #520] // 8-byte Folded Reload
+ adcs x12, x13, x12
+ ldr x13, [sp, #560] // 8-byte Folded Reload
+ adcs x13, x13, x14
+ adcs x14, xzr, xzr
+ ldr x30, [sp, #320] // 8-byte Folded Reload
+ adds x8, x8, x30
+ ldr x30, [sp, #400] // 8-byte Folded Reload
+ adcs x9, x9, x30
+ ldr x30, [sp, #472] // 8-byte Folded Reload
+ adcs x10, x10, x30
+ ldr x30, [sp, #504] // 8-byte Folded Reload
+ adcs x11, x11, x30
+ ldr x30, [sp, #544] // 8-byte Folded Reload
+ adcs x12, x12, x30
+ ldr x30, [sp, #576] // 8-byte Folded Reload
+ adcs x13, x13, x30
+ ldr x30, [sp, #600] // 8-byte Folded Reload
+ adcs x14, x14, x30
+ ldr x30, [sp, #248] // 8-byte Folded Reload
+ adds x8, x30, x8
+ str x8, [x0, #16]
+ ldp x30, x8, [sp, #128]
+ adcs x8, x8, x9
+ ldr x9, [sp, #192] // 8-byte Folded Reload
+ adcs x9, x9, x10
+ ldr x10, [sp, #240] // 8-byte Folded Reload
+ adcs x10, x10, x11
+ ldr x11, [sp, #296] // 8-byte Folded Reload
+ adcs x11, x11, x12
+ ldr x12, [sp, #344] // 8-byte Folded Reload
+ adcs x12, x12, x13
+ ldr x13, [sp, #392] // 8-byte Folded Reload
+ adcs x13, x13, x14
+ adcs x14, xzr, xzr
+ adds x8, x8, x30
+ ldr x30, [sp, #184] // 8-byte Folded Reload
+ adcs x9, x9, x30
+ ldr x30, [sp, #232] // 8-byte Folded Reload
+ adcs x10, x10, x30
+ ldr x30, [sp, #288] // 8-byte Folded Reload
+ adcs x11, x11, x30
+ ldr x30, [sp, #336] // 8-byte Folded Reload
+ adcs x12, x12, x30
+ ldr x30, [sp, #384] // 8-byte Folded Reload
+ adcs x13, x13, x30
+ ldr x30, [sp, #440] // 8-byte Folded Reload
+ adcs x14, x14, x30
+ adds x8, x28, x8
+ str x8, [x0, #24]
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, x9
+ ldr x9, [sp, #144] // 8-byte Folded Reload
+ adcs x9, x9, x10
+ ldr x10, [sp, #224] // 8-byte Folded Reload
+ adcs x10, x10, x11
+ ldr x11, [sp, #312] // 8-byte Folded Reload
+ adcs x11, x11, x12
+ ldr x12, [sp, #376] // 8-byte Folded Reload
+ adcs x12, x12, x13
+ ldr x13, [sp, #464] // 8-byte Folded Reload
+ adcs x13, x13, x14
+ adcs x14, xzr, xzr
+ ldr x28, [sp, #176] // 8-byte Folded Reload
+ adds x8, x8, x28
+ ldr x28, [sp, #264] // 8-byte Folded Reload
+ adcs x9, x9, x28
+ ldr x28, [sp, #328] // 8-byte Folded Reload
+ adcs x10, x10, x28
+ ldr x28, [sp, #416] // 8-byte Folded Reload
+ adcs x11, x11, x28
+ ldr x28, [sp, #480] // 8-byte Folded Reload
+ adcs x12, x12, x28
+ ldr x28, [sp, #536] // 8-byte Folded Reload
+ adcs x13, x13, x28
+ ldr x28, [sp, #584] // 8-byte Folded Reload
+ adcs x14, x14, x28
+ ldr x28, [sp, #48] // 8-byte Folded Reload
+ adds x8, x28, x8
+ str x8, [x0, #32]
+ adcs x8, x24, x9
+ ldr x9, [sp, #16] // 8-byte Folded Reload
+ adcs x9, x9, x10
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ adcs x10, x10, x11
+ ldr x11, [sp, #72] // 8-byte Folded Reload
+ adcs x11, x11, x12
+ ldr x12, [sp, #120] // 8-byte Folded Reload
+ adcs x12, x12, x13
+ ldr x13, [sp, #168] // 8-byte Folded Reload
+ adcs x13, x13, x14
+ adcs x14, xzr, xzr
+ adds x8, x8, x23
+ adcs x9, x9, x29
+ ldr x23, [sp, #32] // 8-byte Folded Reload
+ adcs x10, x10, x23
+ ldr x23, [sp, #64] // 8-byte Folded Reload
+ adcs x11, x11, x23
+ ldr x23, [sp, #112] // 8-byte Folded Reload
+ adcs x12, x12, x23
+ ldr x23, [sp, #160] // 8-byte Folded Reload
+ adcs x13, x13, x23
+ ldr x23, [sp, #216] // 8-byte Folded Reload
+ adcs x14, x14, x23
+ adds x8, x20, x8
+ str x8, [x0, #40]
+ adcs x8, x22, x9
+ adcs x9, x25, x10
+ ldr x10, [sp, #24] // 8-byte Folded Reload
+ adcs x10, x10, x11
+ ldr x11, [sp, #88] // 8-byte Folded Reload
+ adcs x11, x11, x12
+ ldr x12, [sp, #152] // 8-byte Folded Reload
+ adcs x12, x12, x13
+ ldr x13, [sp, #256] // 8-byte Folded Reload
+ adcs x13, x13, x14
+ adcs x14, xzr, xzr
+ adds x8, x8, x26
+ ldr x20, [sp, #56] // 8-byte Folded Reload
+ adcs x9, x9, x20
+ ldr x20, [sp, #104] // 8-byte Folded Reload
+ adcs x10, x10, x20
+ ldr x20, [sp, #200] // 8-byte Folded Reload
+ adcs x11, x11, x20
+ ldr x20, [sp, #280] // 8-byte Folded Reload
+ adcs x12, x12, x20
+ ldr x20, [sp, #360] // 8-byte Folded Reload
+ adcs x13, x13, x20
+ ldr x20, [sp, #432] // 8-byte Folded Reload
+ adcs x14, x14, x20
+ adds x8, x27, x8
+ str x8, [x0, #48]
+ adcs x8, x5, x9
+ adcs x9, x3, x10
+ adcs x10, x2, x11
+ adcs x11, x17, x12
+ adcs x12, x16, x13
+ adcs x13, x15, x14
+ adcs x14, xzr, xzr
+ adds x8, x8, x21
+ str x8, [x0, #56]
+ adcs x8, x9, x19
+ str x8, [x0, #64]
+ adcs x8, x10, x7
+ str x8, [x0, #72]
+ adcs x8, x11, x6
+ str x8, [x0, #80]
+ adcs x8, x12, x4
+ str x8, [x0, #88]
+ adcs x8, x13, x1
+ str x8, [x0, #96]
+ adcs x8, x14, x18
+ str x8, [x0, #104]
+ add sp, sp, #624 // =624
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end96:
+ .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L
+
+ .globl mcl_fpDbl_sqrPre7L
+ .align 2
+ .type mcl_fpDbl_sqrPre7L,@function
+mcl_fpDbl_sqrPre7L: // @mcl_fpDbl_sqrPre7L
+// BB#0:
+ stp x24, x23, [sp, #-48]!
+ stp x22, x21, [sp, #16]
+ stp x20, x19, [sp, #32]
+ ldp x11, x8, [x1]
+ ldp x9, x10, [x1, #40]
+ ldp x15, x12, [x1, #16]
+ ldp x16, x3, [x1, #16]
+ ldp x13, x14, [x1, #32]
+ ldp x18, x17, [x1, #32]
+ ldr x2, [x1, #32]
+ mul x4, x11, x11
+ umulh x5, x10, x11
+ mul x6, x9, x11
+ mul x7, x18, x11
+ mul x19, x3, x11
+ umulh x20, x16, x11
+ mul x21, x16, x11
+ umulh x22, x8, x11
+ mul x23, x8, x11
+ str x4, [x0]
+ umulh x4, x11, x11
+ adds x4, x4, x23
+ adcs x21, x22, x21
+ adcs x19, x20, x19
+ umulh x20, x3, x11
+ adcs x7, x20, x7
+ umulh x20, x18, x11
+ adcs x6, x20, x6
+ mul x20, x10, x11
+ umulh x11, x9, x11
+ adcs x20, x11, x20
+ adcs x5, x5, xzr
+ adds x4, x23, x4
+ ldp x11, x23, [x1, #40]
+ str x4, [x0, #8]
+ mul x4, x8, x8
+ adcs x4, x4, x21
+ mul x21, x16, x8
+ adcs x19, x21, x19
+ mul x21, x3, x8
+ adcs x7, x21, x7
+ mul x21, x18, x8
+ adcs x6, x21, x6
+ mul x21, x9, x8
+ adcs x20, x21, x20
+ mul x21, x10, x8
+ umulh x10, x10, x8
+ umulh x9, x9, x8
+ umulh x18, x18, x8
+ umulh x3, x3, x8
+ umulh x16, x16, x8
+ umulh x8, x8, x8
+ adcs x5, x21, x5
+ adcs x21, xzr, xzr
+ adds x4, x4, x22
+ adcs x8, x19, x8
+ ldp x19, x22, [x1]
+ adcs x16, x7, x16
+ adcs x3, x6, x3
+ ldp x6, x7, [x1, #8]
+ adcs x18, x20, x18
+ mul x20, x19, x15
+ adcs x9, x5, x9
+ mul x5, x23, x15
+ adcs x10, x21, x10
+ mul x21, x14, x15
+ adds x4, x20, x4
+ mul x20, x13, x15
+ str x4, [x0, #16]
+ mul x4, x6, x15
+ adcs x8, x4, x8
+ mul x4, x15, x15
+ adcs x16, x4, x16
+ mul x4, x12, x15
+ adcs x3, x4, x3
+ adcs x18, x20, x18
+ umulh x20, x13, x15
+ adcs x9, x21, x9
+ umulh x21, x19, x15
+ adcs x10, x5, x10
+ adcs x5, xzr, xzr
+ adds x8, x8, x21
+ umulh x21, x6, x15
+ adcs x16, x16, x21
+ umulh x21, x15, x15
+ adcs x3, x3, x21
+ umulh x21, x12, x15
+ adcs x18, x18, x21
+ adcs x9, x9, x20
+ umulh x20, x14, x15
+ adcs x10, x10, x20
+ umulh x15, x23, x15
+ adcs x15, x5, x15
+ mul x5, x19, x12
+ adds x8, x5, x8
+ ldr x5, [x1, #32]
+ str x8, [x0, #24]
+ mul x8, x6, x12
+ adcs x8, x8, x16
+ ldr x16, [x1]
+ adcs x3, x4, x3
+ mul x4, x12, x12
+ adcs x18, x4, x18
+ mul x4, x13, x12
+ adcs x9, x4, x9
+ mul x4, x14, x12
+ adcs x10, x4, x10
+ mul x4, x23, x12
+ umulh x19, x19, x12
+ adcs x15, x4, x15
+ adcs x4, xzr, xzr
+ adds x8, x8, x19
+ ldr x19, [x1, #24]
+ umulh x6, x6, x12
+ adcs x3, x3, x6
+ ldr x6, [x1, #48]
+ adcs x18, x18, x21
+ ldr x20, [x1, #48]
+ umulh x21, x23, x12
+ umulh x14, x14, x12
+ umulh x13, x13, x12
+ umulh x12, x12, x12
+ adcs x9, x9, x12
+ adcs x10, x10, x13
+ ldp x12, x13, [x1]
+ adcs x14, x15, x14
+ mul x15, x16, x5
+ adcs x4, x4, x21
+ mul x21, x6, x5
+ adds x8, x15, x8
+ mul x15, x17, x5
+ str x8, [x0, #32]
+ mul x8, x22, x5
+ adcs x8, x8, x3
+ mul x3, x7, x5
+ adcs x18, x3, x18
+ mul x3, x19, x5
+ adcs x9, x3, x9
+ mul x3, x5, x5
+ adcs x10, x3, x10
+ umulh x3, x16, x5
+ adcs x14, x15, x14
+ adcs x4, x21, x4
+ adcs x21, xzr, xzr
+ adds x8, x8, x3
+ umulh x3, x22, x5
+ adcs x18, x18, x3
+ umulh x3, x7, x5
+ adcs x9, x9, x3
+ umulh x3, x19, x5
+ adcs x10, x10, x3
+ umulh x3, x5, x5
+ adcs x14, x14, x3
+ umulh x3, x6, x5
+ umulh x5, x17, x5
+ adcs x4, x4, x5
+ adcs x3, x21, x3
+ mul x21, x16, x17
+ adds x8, x21, x8
+ ldp x21, x1, [x1, #16]
+ str x8, [x0, #40]
+ mul x8, x22, x17
+ adcs x8, x8, x18
+ mul x18, x7, x17
+ adcs x9, x18, x9
+ mul x18, x19, x17
+ adcs x10, x18, x10
+ mul x18, x6, x17
+ adcs x14, x15, x14
+ mul x15, x17, x17
+ umulh x6, x6, x17
+ umulh x19, x19, x17
+ umulh x7, x7, x17
+ umulh x22, x22, x17
+ umulh x16, x16, x17
+ umulh x17, x17, x17
+ adcs x15, x15, x4
+ mul x4, x12, x20
+ adcs x18, x18, x3
+ adcs x3, xzr, xzr
+ adds x8, x8, x16
+ mul x16, x11, x20
+ adcs x9, x9, x22
+ mul x22, x2, x20
+ adcs x10, x10, x7
+ mul x7, x1, x20
+ adcs x14, x14, x19
+ mul x19, x21, x20
+ adcs x15, x15, x5
+ mul x5, x13, x20
+ adcs x17, x18, x17
+ mul x18, x20, x20
+ umulh x12, x12, x20
+ umulh x13, x13, x20
+ umulh x21, x21, x20
+ umulh x1, x1, x20
+ umulh x2, x2, x20
+ umulh x11, x11, x20
+ umulh x20, x20, x20
+ adcs x3, x3, x6
+ adds x8, x4, x8
+ str x8, [x0, #48]
+ adcs x8, x5, x9
+ adcs x9, x19, x10
+ adcs x10, x7, x14
+ adcs x14, x22, x15
+ adcs x15, x16, x17
+ adcs x16, x18, x3
+ adcs x17, xzr, xzr
+ adds x8, x8, x12
+ str x8, [x0, #56]
+ adcs x8, x9, x13
+ str x8, [x0, #64]
+ adcs x8, x10, x21
+ str x8, [x0, #72]
+ adcs x8, x14, x1
+ str x8, [x0, #80]
+ adcs x8, x15, x2
+ str x8, [x0, #88]
+ adcs x8, x16, x11
+ str x8, [x0, #96]
+ adcs x8, x17, x20
+ str x8, [x0, #104]
+ ldp x20, x19, [sp, #32]
+ ldp x22, x21, [sp, #16]
+ ldp x24, x23, [sp], #48
+ ret
+.Lfunc_end97:
+ .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L
+
+ .globl mcl_fp_mont7L
+ .align 2
+ .type mcl_fp_mont7L,@function
+mcl_fp_mont7L: // @mcl_fp_mont7L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #144 // =144
+ str x2, [sp, #112] // 8-byte Folded Spill
+ str x0, [sp, #64] // 8-byte Folded Spill
+ ldr x6, [x2]
+ ldr x15, [x1, #48]
+ str x15, [sp, #96] // 8-byte Folded Spill
+ ldr x0, [x1, #32]
+ str x0, [sp, #56] // 8-byte Folded Spill
+ ldr x18, [x1, #40]
+ ldp x11, x13, [x1, #16]
+ ldp x17, x5, [x1]
+ str x5, [sp, #88] // 8-byte Folded Spill
+ ldur x12, [x3, #-8]
+ str x12, [sp, #128] // 8-byte Folded Spill
+ ldr x1, [x3, #32]
+ str x1, [sp, #104] // 8-byte Folded Spill
+ ldr x9, [x3, #40]
+ str x9, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [x3, #16]
+ str x8, [sp, #136] // 8-byte Folded Spill
+ ldr x10, [x3, #24]
+ str x10, [sp, #120] // 8-byte Folded Spill
+ ldr x14, [x3]
+ str x14, [sp, #24] // 8-byte Folded Spill
+ ldr x4, [x3, #8]
+ str x4, [sp, #72] // 8-byte Folded Spill
+ ldr x7, [x2, #8]
+ umulh x19, x15, x6
+ mul x20, x15, x6
+ umulh x21, x18, x6
+ mul x22, x18, x6
+ mov x15, x0
+ umulh x23, x15, x6
+ mul x24, x15, x6
+ mov x16, x13
+ umulh x25, x16, x6
+ mul x26, x16, x6
+ mov x13, x11
+ umulh x27, x13, x6
+ mul x28, x13, x6
+ mul x29, x5, x6
+ mov x11, x17
+ umulh x30, x11, x6
+ adds x29, x30, x29
+ umulh x30, x5, x6
+ mul x6, x11, x6
+ adcs x28, x30, x28
+ mul x30, x6, x12
+ adcs x26, x27, x26
+ mul x27, x30, x10
+ adcs x24, x25, x24
+ mul x25, x30, x8
+ adcs x22, x23, x22
+ mul x23, x30, x4
+ adcs x20, x21, x20
+ umulh x21, x30, x14
+ adcs x19, x19, xzr
+ adds x21, x21, x23
+ umulh x23, x30, x4
+ adcs x23, x23, x25
+ umulh x25, x30, x8
+ adcs x25, x25, x27
+ mul x27, x30, x1
+ umulh x17, x30, x10
+ adcs x17, x17, x27
+ ldr x3, [x3, #48]
+ str x3, [sp, #48] // 8-byte Folded Spill
+ mul x27, x30, x9
+ umulh x0, x30, x1
+ adcs x0, x0, x27
+ mul x27, x30, x3
+ umulh x2, x30, x9
+ adcs x2, x2, x27
+ umulh x27, x30, x3
+ mul x30, x30, x14
+ adcs x27, x27, xzr
+ cmn x30, x6
+ adcs x6, x21, x29
+ adcs x21, x23, x28
+ mul x23, x7, x15
+ adcs x25, x25, x26
+ mul x26, x7, x16
+ adcs x17, x17, x24
+ mul x24, x7, x13
+ adcs x0, x0, x22
+ mul x22, x7, x5
+ adcs x2, x2, x20
+ umulh x20, x7, x11
+ adcs x19, x27, x19
+ adcs x27, xzr, xzr
+ adds x20, x20, x22
+ umulh x22, x7, x5
+ adcs x22, x22, x24
+ umulh x24, x7, x13
+ mov x5, x13
+ adcs x24, x24, x26
+ umulh x26, x7, x16
+ adcs x23, x26, x23
+ mul x26, x7, x18
+ umulh x28, x7, x15
+ adcs x26, x28, x26
+ ldr x15, [sp, #96] // 8-byte Folded Reload
+ mul x28, x7, x15
+ umulh x29, x7, x18
+ adcs x28, x29, x28
+ umulh x29, x7, x15
+ mul x7, x7, x11
+ adcs x29, x29, xzr
+ adds x30, x6, x7
+ adcs x6, x21, x20
+ adcs x25, x25, x22
+ mul x22, x30, x12
+ adcs x24, x17, x24
+ mul x17, x22, x10
+ adcs x0, x0, x23
+ mul x23, x22, x8
+ adcs x7, x2, x26
+ mul x2, x22, x4
+ adcs x20, x19, x28
+ umulh x26, x22, x14
+ adcs x21, x27, x29
+ adcs x19, xzr, xzr
+ adds x2, x26, x2
+ umulh x26, x22, x4
+ adcs x23, x26, x23
+ umulh x26, x22, x8
+ adcs x17, x26, x17
+ mul x26, x22, x1
+ umulh x27, x22, x10
+ adcs x26, x27, x26
+ mul x27, x22, x9
+ umulh x28, x22, x1
+ adcs x27, x28, x27
+ mul x28, x22, x3
+ umulh x29, x22, x9
+ adcs x28, x29, x28
+ umulh x29, x22, x3
+ mul x22, x22, x14
+ mov x10, x14
+ adcs x29, x29, xzr
+ cmn x22, x30
+ adcs x22, x2, x6
+ adcs x23, x23, x25
+ ldr x8, [sp, #112] // 8-byte Folded Reload
+ adcs x24, x17, x24
+ ldp x25, x17, [x8, #16]
+ adcs x0, x26, x0
+ mul x2, x25, x16
+ adcs x6, x27, x7
+ mul x7, x25, x5
+ adcs x20, x28, x20
+ ldp x15, x8, [sp, #88]
+ mul x26, x25, x15
+ adcs x21, x29, x21
+ mov x12, x11
+ umulh x27, x25, x12
+ adcs x19, x19, xzr
+ adds x26, x27, x26
+ umulh x27, x25, x15
+ adcs x7, x27, x7
+ umulh x27, x25, x5
+ mov x9, x5
+ adcs x2, x27, x2
+ ldr x11, [sp, #56] // 8-byte Folded Reload
+ mul x27, x25, x11
+ umulh x28, x25, x16
+ mov x13, x16
+ adcs x27, x28, x27
+ mul x28, x25, x18
+ umulh x29, x25, x11
+ adcs x28, x29, x28
+ mul x29, x25, x8
+ umulh x30, x25, x18
+ adcs x29, x30, x29
+ umulh x30, x25, x8
+ mov x14, x8
+ mul x25, x25, x12
+ mov x5, x12
+ adcs x30, x30, xzr
+ adds x22, x22, x25
+ adcs x23, x23, x26
+ adcs x7, x24, x7
+ adcs x0, x0, x2
+ ldp x8, x12, [sp, #128]
+ mul x2, x22, x8
+ adcs x6, x6, x27
+ mul x24, x2, x12
+ adcs x20, x20, x28
+ mul x25, x2, x4
+ adcs x21, x21, x29
+ mov x1, x10
+ umulh x26, x2, x1
+ adcs x19, x19, x30
+ adcs x27, xzr, xzr
+ adds x25, x26, x25
+ umulh x26, x2, x4
+ adcs x24, x26, x24
+ ldr x10, [sp, #120] // 8-byte Folded Reload
+ mul x26, x2, x10
+ umulh x28, x2, x12
+ adcs x26, x28, x26
+ ldr x12, [sp, #104] // 8-byte Folded Reload
+ mul x28, x2, x12
+ umulh x29, x2, x10
+ adcs x28, x29, x28
+ ldr x10, [sp, #80] // 8-byte Folded Reload
+ mul x29, x2, x10
+ umulh x30, x2, x12
+ adcs x29, x30, x29
+ mul x30, x2, x3
+ umulh x12, x2, x10
+ adcs x12, x12, x30
+ umulh x30, x2, x3
+ mul x2, x2, x1
+ adcs x30, x30, xzr
+ cmn x2, x22
+ adcs x2, x25, x23
+ adcs x7, x24, x7
+ adcs x0, x26, x0
+ mul x22, x17, x11
+ adcs x6, x28, x6
+ mul x23, x17, x13
+ adcs x20, x29, x20
+ mul x24, x17, x9
+ adcs x12, x12, x21
+ mul x21, x17, x15
+ adcs x19, x30, x19
+ umulh x25, x17, x5
+ adcs x26, x27, xzr
+ adds x21, x25, x21
+ umulh x25, x17, x15
+ adcs x24, x25, x24
+ umulh x25, x17, x9
+ mov x16, x9
+ adcs x23, x25, x23
+ umulh x25, x17, x13
+ adcs x22, x25, x22
+ mul x25, x17, x18
+ umulh x27, x17, x11
+ adcs x25, x27, x25
+ mov x9, x14
+ mul x27, x17, x9
+ umulh x28, x17, x18
+ adcs x27, x28, x27
+ umulh x28, x17, x9
+ mul x17, x17, x5
+ mov x15, x5
+ adcs x28, x28, xzr
+ adds x17, x2, x17
+ adcs x2, x7, x21
+ adcs x0, x0, x24
+ mul x24, x17, x8
+ adcs x29, x6, x23
+ ldr x9, [sp, #120] // 8-byte Folded Reload
+ mul x23, x24, x9
+ adcs x6, x20, x22
+ ldr x8, [sp, #136] // 8-byte Folded Reload
+ mul x22, x24, x8
+ adcs x7, x12, x25
+ mul x12, x24, x4
+ adcs x20, x19, x27
+ umulh x25, x24, x1
+ adcs x21, x26, x28
+ adcs x19, xzr, xzr
+ adds x12, x25, x12
+ umulh x25, x24, x4
+ adcs x25, x25, x22
+ umulh x22, x24, x8
+ adcs x26, x22, x23
+ ldr x5, [sp, #104] // 8-byte Folded Reload
+ mul x22, x24, x5
+ umulh x23, x24, x9
+ adcs x27, x23, x22
+ mov x9, x10
+ mul x22, x24, x9
+ umulh x23, x24, x5
+ adcs x28, x23, x22
+ mul x22, x24, x3
+ umulh x23, x24, x9
+ adcs x30, x23, x22
+ umulh x22, x24, x3
+ mul x23, x24, x1
+ mov x3, x1
+ adcs x24, x22, xzr
+ cmn x23, x17
+ adcs x22, x12, x2
+ adcs x23, x25, x0
+ ldr x10, [sp, #112] // 8-byte Folded Reload
+ ldp x12, x0, [x10, #32]
+ adcs x17, x26, x29
+ adcs x2, x27, x6
+ mul x6, x12, x13
+ adcs x7, x28, x7
+ mov x10, x16
+ mul x25, x12, x10
+ adcs x20, x30, x20
+ ldr x16, [sp, #88] // 8-byte Folded Reload
+ mul x26, x12, x16
+ adcs x21, x24, x21
+ umulh x24, x12, x15
+ adcs x1, x19, xzr
+ adds x24, x24, x26
+ umulh x26, x12, x16
+ adcs x25, x26, x25
+ umulh x26, x12, x10
+ adcs x6, x26, x6
+ mul x26, x12, x11
+ umulh x27, x12, x13
+ adcs x26, x27, x26
+ mul x27, x12, x18
+ umulh x28, x12, x11
+ adcs x27, x28, x27
+ mul x28, x12, x14
+ umulh x29, x12, x18
+ adcs x28, x29, x28
+ umulh x29, x12, x14
+ mul x12, x12, x15
+ adcs x29, x29, xzr
+ adds x12, x22, x12
+ adcs x22, x23, x24
+ adcs x17, x17, x25
+ adcs x2, x2, x6
+ ldr x19, [sp, #128] // 8-byte Folded Reload
+ mul x6, x12, x19
+ adcs x7, x7, x26
+ mov x30, x8
+ mul x23, x6, x30
+ adcs x20, x20, x27
+ mul x24, x6, x4
+ adcs x21, x21, x28
+ mov x8, x3
+ umulh x25, x6, x8
+ adcs x1, x1, x29
+ adcs x26, xzr, xzr
+ adds x24, x25, x24
+ umulh x25, x6, x4
+ adcs x23, x25, x23
+ ldr x4, [sp, #120] // 8-byte Folded Reload
+ mul x25, x6, x4
+ umulh x27, x6, x30
+ adcs x25, x27, x25
+ mul x27, x6, x5
+ umulh x28, x6, x4
+ adcs x27, x28, x27
+ mov x3, x9
+ mul x28, x6, x3
+ umulh x29, x6, x5
+ adcs x28, x29, x28
+ ldr x9, [sp, #48] // 8-byte Folded Reload
+ mul x29, x6, x9
+ umulh x30, x6, x3
+ adcs x29, x30, x29
+ umulh x30, x6, x9
+ mov x3, x9
+ mul x6, x6, x8
+ mov x5, x8
+ adcs x30, x30, xzr
+ cmn x6, x12
+ adcs x12, x24, x22
+ adcs x17, x23, x17
+ adcs x2, x25, x2
+ mul x6, x0, x11
+ adcs x7, x27, x7
+ mul x22, x0, x13
+ adcs x20, x28, x20
+ mul x23, x0, x10
+ adcs x21, x29, x21
+ mul x24, x0, x16
+ adcs x29, x30, x1
+ mov x1, x15
+ umulh x25, x0, x1
+ adcs x26, x26, xzr
+ adds x24, x25, x24
+ umulh x25, x0, x16
+ adcs x23, x25, x23
+ umulh x25, x0, x10
+ adcs x22, x25, x22
+ umulh x25, x0, x13
+ adcs x6, x25, x6
+ mul x25, x0, x18
+ umulh x27, x0, x11
+ adcs x25, x27, x25
+ mov x9, x14
+ mul x27, x0, x9
+ umulh x28, x0, x18
+ adcs x27, x28, x27
+ umulh x28, x0, x9
+ mul x0, x0, x1
+ adcs x28, x28, xzr
+ adds x12, x12, x0
+ adcs x8, x17, x24
+ str x8, [sp, #40] // 8-byte Folded Spill
+ adcs x8, x2, x23
+ str x8, [sp, #32] // 8-byte Folded Spill
+ mul x2, x12, x19
+ adcs x7, x7, x22
+ mul x22, x2, x4
+ adcs x8, x20, x6
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #136] // 8-byte Folded Reload
+ mul x20, x2, x8
+ adcs x21, x21, x25
+ ldr x9, [sp, #72] // 8-byte Folded Reload
+ mul x23, x2, x9
+ adcs x19, x29, x27
+ mov x15, x5
+ umulh x24, x2, x15
+ adcs x17, x26, x28
+ str x17, [sp, #8] // 8-byte Folded Spill
+ adcs x26, xzr, xzr
+ adds x23, x24, x23
+ umulh x24, x2, x9
+ adcs x20, x24, x20
+ umulh x24, x2, x8
+ adcs x22, x24, x22
+ ldp x25, x8, [sp, #104]
+ mul x24, x2, x25
+ umulh x27, x2, x4
+ adcs x6, x27, x24
+ ldr x5, [sp, #80] // 8-byte Folded Reload
+ mul x27, x2, x5
+ umulh x28, x2, x25
+ adcs x27, x28, x27
+ mul x28, x2, x3
+ umulh x29, x2, x5
+ adcs x28, x29, x28
+ ldr x29, [x8, #48]
+ mul x30, x2, x15
+ umulh x2, x2, x3
+ adcs x2, x2, xzr
+ cmn x30, x12
+ umulh x24, x29, x14
+ mul x30, x29, x14
+ umulh x0, x29, x18
+ mul x18, x29, x18
+ umulh x17, x29, x11
+ mul x15, x29, x11
+ umulh x14, x29, x13
+ mul x13, x29, x13
+ umulh x12, x29, x10
+ mul x11, x29, x10
+ mul x10, x29, x16
+ umulh x9, x29, x16
+ umulh x8, x29, x1
+ mul x29, x29, x1
+ ldr x16, [sp, #40] // 8-byte Folded Reload
+ adcs x23, x23, x16
+ ldr x16, [sp, #32] // 8-byte Folded Reload
+ adcs x20, x20, x16
+ adcs x7, x22, x7
+ ldr x16, [sp, #16] // 8-byte Folded Reload
+ adcs x6, x6, x16
+ adcs x21, x27, x21
+ adcs x19, x28, x19
+ ldr x16, [sp, #8] // 8-byte Folded Reload
+ adcs x2, x2, x16
+ adcs x22, x26, xzr
+ adds x8, x8, x10
+ adcs x9, x9, x11
+ adcs x10, x12, x13
+ adcs x11, x14, x15
+ adcs x12, x17, x18
+ adcs x13, x0, x30
+ adcs x14, x24, xzr
+ adds x15, x23, x29
+ adcs x8, x20, x8
+ ldr x16, [sp, #128] // 8-byte Folded Reload
+ mul x16, x15, x16
+ adcs x9, x7, x9
+ mul x17, x16, x3
+ mul x18, x16, x5
+ mul x0, x16, x25
+ adcs x10, x6, x10
+ mul x6, x16, x4
+ adcs x11, x21, x11
+ ldr x21, [sp, #136] // 8-byte Folded Reload
+ mul x7, x16, x21
+ adcs x12, x19, x12
+ ldr x23, [sp, #72] // 8-byte Folded Reload
+ mul x19, x16, x23
+ adcs x13, x2, x13
+ ldr x24, [sp, #24] // 8-byte Folded Reload
+ umulh x2, x16, x24
+ adcs x14, x22, x14
+ adcs x20, xzr, xzr
+ adds x2, x2, x19
+ umulh x19, x16, x23
+ adcs x7, x19, x7
+ umulh x19, x16, x21
+ adcs x6, x19, x6
+ umulh x19, x16, x4
+ adcs x0, x19, x0
+ umulh x19, x16, x25
+ adcs x18, x19, x18
+ umulh x19, x16, x5
+ adcs x17, x19, x17
+ umulh x19, x16, x3
+ mul x16, x16, x24
+ adcs x19, x19, xzr
+ cmn x16, x15
+ adcs x8, x2, x8
+ adcs x9, x7, x9
+ adcs x10, x6, x10
+ adcs x11, x0, x11
+ adcs x12, x18, x12
+ adcs x13, x17, x13
+ adcs x14, x19, x14
+ adcs x15, x20, xzr
+ subs x16, x8, x24
+ sbcs x17, x9, x23
+ sbcs x18, x10, x21
+ sbcs x0, x11, x4
+ sbcs x1, x12, x25
+ sbcs x2, x13, x5
+ sbcs x3, x14, x3
+ sbcs x15, x15, xzr
+ tst x15, #0x1
+ csel x8, x8, x16, ne
+ csel x9, x9, x17, ne
+ csel x10, x10, x18, ne
+ csel x11, x11, x0, ne
+ csel x12, x12, x1, ne
+ csel x13, x13, x2, ne
+ csel x14, x14, x3, ne
+ ldr x15, [sp, #64] // 8-byte Folded Reload
+ stp x8, x9, [x15]
+ stp x10, x11, [x15, #16]
+ stp x12, x13, [x15, #32]
+ str x14, [x15, #48]
+ add sp, sp, #144 // =144
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end98:
+ .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L
+
+ .globl mcl_fp_montNF7L
+ .align 2
+ .type mcl_fp_montNF7L,@function
+mcl_fp_montNF7L: // @mcl_fp_montNF7L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ sub sp, sp, #32 // =32
+ stp x0, x2, [sp, #8]
+ ldr x7, [x2]
+ ldp x5, x16, [x1, #40]
+ ldp x6, x17, [x1, #24]
+ ldr x4, [x1]
+ ldp x1, x18, [x1, #8]
+ ldur x8, [x3, #-8]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldp x15, x0, [x3, #40]
+ ldp x11, x10, [x3, #24]
+ ldp x13, x12, [x3, #8]
+ ldr x14, [x3]
+ ldr x25, [x2, #8]
+ umulh x3, x16, x7
+ mul x19, x16, x7
+ umulh x20, x5, x7
+ mul x21, x5, x7
+ umulh x22, x17, x7
+ mul x23, x17, x7
+ umulh x24, x6, x7
+ mul x26, x6, x7
+ umulh x27, x18, x7
+ mul x28, x18, x7
+ mul x29, x1, x7
+ umulh x30, x4, x7
+ adds x29, x30, x29
+ umulh x30, x1, x7
+ mul x7, x4, x7
+ adcs x28, x30, x28
+ mul x30, x25, x5
+ adcs x26, x27, x26
+ mul x27, x25, x17
+ adcs x23, x24, x23
+ mul x24, x25, x6
+ adcs x21, x22, x21
+ mul x22, x7, x8
+ adcs x19, x20, x19
+ mul x20, x22, x14
+ adcs x3, x3, xzr
+ cmn x20, x7
+ mul x9, x25, x18
+ mul x7, x22, x13
+ adcs x7, x7, x29
+ mul x20, x22, x12
+ adcs x20, x20, x28
+ mul x28, x22, x11
+ adcs x26, x28, x26
+ mul x28, x22, x10
+ adcs x23, x28, x23
+ mul x28, x22, x15
+ adcs x21, x28, x21
+ mul x28, x22, x0
+ adcs x19, x28, x19
+ umulh x28, x22, x14
+ adcs x29, x3, xzr
+ adds x28, x7, x28
+ umulh x3, x22, x13
+ adcs x8, x20, x3
+ umulh x3, x22, x12
+ adcs x26, x26, x3
+ umulh x3, x22, x11
+ adcs x3, x23, x3
+ umulh x7, x22, x10
+ adcs x7, x21, x7
+ umulh x20, x22, x15
+ adcs x19, x19, x20
+ mul x21, x25, x1
+ umulh x20, x22, x0
+ adcs x20, x29, x20
+ umulh x22, x25, x4
+ adds x29, x22, x21
+ umulh x21, x25, x1
+ adcs x23, x21, x9
+ umulh x9, x25, x18
+ adcs x21, x9, x24
+ umulh x9, x25, x6
+ adcs x22, x9, x27
+ umulh x9, x25, x17
+ adcs x30, x9, x30
+ mul x9, x25, x16
+ umulh x24, x25, x5
+ adcs x24, x24, x9
+ umulh x9, x25, x16
+ mul x25, x25, x4
+ adcs x9, x9, xzr
+ adds x27, x25, x28
+ adcs x25, x29, x8
+ ldp x28, x8, [x2, #16]
+ adcs x29, x23, x26
+ adcs x3, x21, x3
+ mul x21, x28, x17
+ adcs x7, x22, x7
+ mul x22, x28, x6
+ adcs x19, x30, x19
+ ldr x2, [sp, #24] // 8-byte Folded Reload
+ mul x23, x27, x2
+ adcs x20, x24, x20
+ mul x24, x23, x14
+ adcs x9, x9, xzr
+ cmn x24, x27
+ mul x24, x28, x18
+ mul x26, x23, x13
+ adcs x25, x26, x25
+ mul x26, x23, x12
+ adcs x26, x26, x29
+ mul x27, x23, x11
+ adcs x3, x27, x3
+ mul x27, x23, x10
+ adcs x7, x27, x7
+ mul x27, x23, x15
+ adcs x19, x27, x19
+ mul x27, x23, x0
+ adcs x20, x27, x20
+ umulh x27, x23, x14
+ adcs x9, x9, xzr
+ adds x25, x25, x27
+ umulh x27, x23, x13
+ adcs x26, x26, x27
+ umulh x27, x23, x12
+ adcs x3, x3, x27
+ umulh x27, x23, x11
+ adcs x7, x7, x27
+ umulh x27, x23, x10
+ adcs x19, x19, x27
+ umulh x27, x23, x15
+ adcs x20, x20, x27
+ mul x27, x28, x1
+ umulh x23, x23, x0
+ adcs x9, x9, x23
+ umulh x23, x28, x4
+ adds x23, x23, x27
+ umulh x27, x28, x1
+ adcs x24, x27, x24
+ umulh x27, x28, x18
+ adcs x22, x27, x22
+ umulh x27, x28, x6
+ adcs x21, x27, x21
+ mul x27, x28, x5
+ umulh x29, x28, x17
+ adcs x27, x29, x27
+ mul x29, x28, x16
+ umulh x30, x28, x5
+ adcs x29, x30, x29
+ umulh x30, x28, x16
+ mul x28, x28, x4
+ adcs x30, x30, xzr
+ adds x25, x28, x25
+ adcs x23, x23, x26
+ adcs x3, x24, x3
+ mul x26, x8, x5
+ adcs x7, x22, x7
+ mul x22, x8, x17
+ adcs x19, x21, x19
+ mul x24, x8, x6
+ adcs x20, x27, x20
+ mul x21, x25, x2
+ adcs x9, x29, x9
+ mul x27, x21, x14
+ adcs x28, x30, xzr
+ cmn x27, x25
+ mul x25, x8, x18
+ mul x27, x21, x13
+ adcs x23, x27, x23
+ mul x27, x21, x12
+ adcs x3, x27, x3
+ mul x27, x21, x11
+ adcs x7, x27, x7
+ mul x27, x21, x10
+ adcs x19, x27, x19
+ mul x27, x21, x15
+ adcs x20, x27, x20
+ mul x27, x21, x0
+ adcs x9, x27, x9
+ umulh x27, x21, x14
+ adcs x28, x28, xzr
+ adds x27, x23, x27
+ umulh x23, x21, x13
+ adcs x3, x3, x23
+ umulh x23, x21, x12
+ adcs x30, x7, x23
+ umulh x7, x21, x11
+ adcs x7, x19, x7
+ umulh x19, x21, x10
+ adcs x19, x20, x19
+ umulh x20, x21, x15
+ adcs x20, x9, x20
+ mul x9, x8, x1
+ umulh x21, x21, x0
+ adcs x21, x28, x21
+ umulh x23, x8, x4
+ adds x9, x23, x9
+ umulh x23, x8, x1
+ adcs x28, x23, x25
+ umulh x23, x8, x18
+ adcs x23, x23, x24
+ umulh x24, x8, x6
+ adcs x24, x24, x22
+ umulh x22, x8, x17
+ adcs x25, x22, x26
+ mul x22, x8, x16
+ umulh x26, x8, x5
+ adcs x26, x26, x22
+ umulh x22, x8, x16
+ mul x29, x8, x4
+ adcs x2, x22, xzr
+ adds x29, x29, x27
+ adcs x27, x9, x3
+ ldr x8, [sp, #16] // 8-byte Folded Reload
+ ldp x22, x3, [x8, #32]
+ adcs x9, x28, x30
+ adcs x7, x23, x7
+ mul x23, x22, x17
+ adcs x19, x24, x19
+ mul x24, x22, x6
+ adcs x20, x25, x20
+ ldr x8, [sp, #24] // 8-byte Folded Reload
+ mul x25, x29, x8
+ adcs x21, x26, x21
+ mul x26, x25, x14
+ adcs x2, x2, xzr
+ cmn x26, x29
+ mul x26, x22, x18
+ mul x28, x25, x13
+ adcs x27, x28, x27
+ mul x28, x25, x12
+ adcs x9, x28, x9
+ mul x28, x25, x11
+ adcs x7, x28, x7
+ mul x28, x25, x10
+ adcs x19, x28, x19
+ mul x28, x25, x15
+ adcs x20, x28, x20
+ mul x28, x25, x0
+ adcs x21, x28, x21
+ umulh x28, x25, x14
+ adcs x2, x2, xzr
+ adds x27, x27, x28
+ umulh x28, x25, x13
+ adcs x9, x9, x28
+ umulh x28, x25, x12
+ adcs x7, x7, x28
+ umulh x28, x25, x11
+ adcs x19, x19, x28
+ umulh x28, x25, x10
+ adcs x20, x20, x28
+ umulh x28, x25, x15
+ adcs x21, x21, x28
+ mul x28, x22, x1
+ umulh x25, x25, x0
+ adcs x2, x2, x25
+ umulh x25, x22, x4
+ adds x25, x25, x28
+ umulh x28, x22, x1
+ adcs x26, x28, x26
+ umulh x28, x22, x18
+ adcs x24, x28, x24
+ umulh x28, x22, x6
+ adcs x23, x28, x23
+ mul x28, x22, x5
+ umulh x29, x22, x17
+ adcs x28, x29, x28
+ mul x29, x22, x16
+ umulh x30, x22, x5
+ adcs x29, x30, x29
+ umulh x30, x22, x16
+ mul x22, x22, x4
+ adcs x30, x30, xzr
+ adds x22, x22, x27
+ adcs x9, x25, x9
+ adcs x7, x26, x7
+ mul x25, x3, x5
+ adcs x19, x24, x19
+ mul x24, x3, x17
+ adcs x20, x23, x20
+ mul x23, x3, x6
+ adcs x21, x28, x21
+ mul x26, x22, x8
+ adcs x8, x29, x2
+ mul x27, x26, x14
+ adcs x28, x30, xzr
+ cmn x27, x22
+ mul x22, x3, x18
+ mul x27, x26, x13
+ adcs x9, x27, x9
+ mul x27, x26, x12
+ adcs x7, x27, x7
+ mul x27, x26, x11
+ adcs x19, x27, x19
+ mul x27, x26, x10
+ adcs x20, x27, x20
+ mul x27, x26, x15
+ adcs x21, x27, x21
+ mul x27, x26, x0
+ adcs x8, x27, x8
+ umulh x27, x26, x14
+ adcs x28, x28, xzr
+ adds x9, x9, x27
+ umulh x27, x26, x13
+ adcs x7, x7, x27
+ umulh x27, x26, x12
+ adcs x19, x19, x27
+ umulh x27, x26, x11
+ adcs x20, x20, x27
+ umulh x27, x26, x10
+ adcs x21, x21, x27
+ umulh x27, x26, x15
+ adcs x8, x8, x27
+ mul x27, x3, x1
+ umulh x26, x26, x0
+ adcs x26, x28, x26
+ umulh x28, x3, x4
+ adds x27, x28, x27
+ umulh x28, x3, x1
+ adcs x22, x28, x22
+ umulh x28, x3, x18
+ adcs x23, x28, x23
+ umulh x28, x3, x6
+ adcs x24, x28, x24
+ umulh x28, x3, x17
+ adcs x25, x28, x25
+ mul x28, x3, x16
+ umulh x29, x3, x5
+ adcs x28, x29, x28
+ ldp x2, x30, [sp, #16]
+ ldr x2, [x2, #48]
+ umulh x29, x3, x16
+ mul x3, x3, x4
+ adcs x29, x29, xzr
+ adds x9, x3, x9
+ adcs x3, x27, x7
+ umulh x7, x2, x16
+ mul x16, x2, x16
+ adcs x19, x22, x19
+ umulh x22, x2, x5
+ mul x5, x2, x5
+ adcs x20, x23, x20
+ umulh x23, x2, x17
+ mul x17, x2, x17
+ adcs x21, x24, x21
+ umulh x24, x2, x6
+ mul x6, x2, x6
+ adcs x8, x25, x8
+ mul x25, x9, x30
+ adcs x26, x28, x26
+ mul x27, x25, x14
+ adcs x28, x29, xzr
+ cmn x27, x9
+ umulh x9, x2, x18
+ mul x18, x2, x18
+ umulh x27, x2, x1
+ mul x1, x2, x1
+ umulh x29, x2, x4
+ mul x2, x2, x4
+ mul x4, x25, x13
+ adcs x3, x4, x3
+ mul x4, x25, x12
+ adcs x4, x4, x19
+ mul x19, x25, x11
+ adcs x19, x19, x20
+ mul x20, x25, x10
+ adcs x20, x20, x21
+ mul x21, x25, x15
+ adcs x8, x21, x8
+ mul x21, x25, x0
+ adcs x21, x21, x26
+ adcs x26, x28, xzr
+ umulh x28, x25, x14
+ adds x3, x3, x28
+ umulh x28, x25, x13
+ adcs x4, x4, x28
+ umulh x28, x25, x12
+ adcs x19, x19, x28
+ umulh x28, x25, x11
+ adcs x20, x20, x28
+ umulh x28, x25, x10
+ adcs x8, x8, x28
+ umulh x28, x25, x15
+ adcs x21, x21, x28
+ umulh x25, x25, x0
+ adcs x25, x26, x25
+ adds x1, x29, x1
+ adcs x18, x27, x18
+ adcs x9, x9, x6
+ adcs x17, x24, x17
+ adcs x5, x23, x5
+ adcs x16, x22, x16
+ adcs x6, x7, xzr
+ adds x2, x2, x3
+ adcs x1, x1, x4
+ adcs x18, x18, x19
+ adcs x9, x9, x20
+ adcs x8, x17, x8
+ adcs x17, x5, x21
+ mul x3, x2, x30
+ adcs x16, x16, x25
+ mul x4, x3, x14
+ adcs x5, x6, xzr
+ cmn x4, x2
+ mul x2, x3, x13
+ adcs x1, x2, x1
+ mul x2, x3, x12
+ adcs x18, x2, x18
+ mul x2, x3, x11
+ adcs x9, x2, x9
+ mul x2, x3, x10
+ adcs x8, x2, x8
+ mul x2, x3, x15
+ adcs x17, x2, x17
+ mul x2, x3, x0
+ adcs x16, x2, x16
+ umulh x2, x3, x14
+ adcs x4, x5, xzr
+ adds x1, x1, x2
+ umulh x2, x3, x13
+ adcs x18, x18, x2
+ umulh x2, x3, x12
+ adcs x9, x9, x2
+ umulh x2, x3, x11
+ adcs x8, x8, x2
+ umulh x2, x3, x10
+ adcs x17, x17, x2
+ umulh x2, x3, x15
+ adcs x16, x16, x2
+ umulh x2, x3, x0
+ adcs x2, x4, x2
+ subs x14, x1, x14
+ sbcs x13, x18, x13
+ sbcs x12, x9, x12
+ sbcs x11, x8, x11
+ sbcs x10, x17, x10
+ sbcs x15, x16, x15
+ sbcs x0, x2, x0
+ asr x3, x0, #63
+ cmp x3, #0 // =0
+ csel x14, x1, x14, lt
+ csel x13, x18, x13, lt
+ csel x9, x9, x12, lt
+ csel x8, x8, x11, lt
+ csel x10, x17, x10, lt
+ csel x11, x16, x15, lt
+ csel x12, x2, x0, lt
+ ldr x15, [sp, #8] // 8-byte Folded Reload
+ stp x14, x13, [x15]
+ stp x9, x8, [x15, #16]
+ stp x10, x11, [x15, #32]
+ str x12, [x15, #48]
+ add sp, sp, #32 // =32
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end99:
+ .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L
+
+ .globl mcl_fp_montRed7L
+ .align 2
+ .type mcl_fp_montRed7L,@function
+mcl_fp_montRed7L: // @mcl_fp_montRed7L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ ldur x15, [x2, #-8]
+ ldp x9, x8, [x2, #40]
+ ldp x11, x10, [x2, #24]
+ ldp x13, x12, [x2, #8]
+ ldr x14, [x2]
+ ldp x17, x18, [x1, #96]
+ ldp x2, x3, [x1, #80]
+ ldp x4, x5, [x1, #64]
+ ldp x6, x7, [x1, #48]
+ ldp x19, x20, [x1, #32]
+ ldp x21, x22, [x1, #16]
+ ldp x16, x1, [x1]
+ mul x23, x16, x15
+ mul x24, x23, x8
+ mul x25, x23, x9
+ mul x26, x23, x10
+ mul x27, x23, x11
+ mul x28, x23, x12
+ mul x29, x23, x13
+ umulh x30, x23, x14
+ adds x29, x30, x29
+ umulh x30, x23, x13
+ adcs x28, x30, x28
+ umulh x30, x23, x12
+ adcs x27, x30, x27
+ umulh x30, x23, x11
+ adcs x26, x30, x26
+ umulh x30, x23, x10
+ adcs x25, x30, x25
+ umulh x30, x23, x9
+ adcs x24, x30, x24
+ umulh x30, x23, x8
+ mul x23, x23, x14
+ adcs x30, x30, xzr
+ cmn x16, x23
+ adcs x16, x1, x29
+ adcs x1, x21, x28
+ mul x21, x16, x15
+ adcs x22, x22, x27
+ mul x23, x21, x8
+ mul x27, x21, x9
+ mul x28, x21, x10
+ mul x29, x21, x11
+ adcs x19, x19, x26
+ mul x26, x21, x12
+ adcs x20, x20, x25
+ mul x25, x21, x13
+ adcs x6, x6, x24
+ umulh x24, x21, x14
+ adcs x7, x7, x30
+ adcs x4, x4, xzr
+ adcs x5, x5, xzr
+ adcs x2, x2, xzr
+ adcs x3, x3, xzr
+ adcs x17, x17, xzr
+ adcs x18, x18, xzr
+ adcs x30, xzr, xzr
+ adds x24, x24, x25
+ umulh x25, x21, x13
+ adcs x25, x25, x26
+ umulh x26, x21, x12
+ adcs x26, x26, x29
+ umulh x29, x21, x11
+ adcs x28, x29, x28
+ umulh x29, x21, x10
+ adcs x27, x29, x27
+ umulh x29, x21, x9
+ adcs x23, x29, x23
+ umulh x29, x21, x8
+ mul x21, x21, x14
+ adcs x29, x29, xzr
+ cmn x21, x16
+ adcs x16, x24, x1
+ adcs x1, x25, x22
+ mul x21, x16, x15
+ adcs x19, x26, x19
+ mul x22, x21, x8
+ mul x24, x21, x9
+ mul x25, x21, x10
+ adcs x20, x28, x20
+ mul x26, x21, x11
+ adcs x6, x27, x6
+ mul x27, x21, x12
+ adcs x7, x23, x7
+ mul x23, x21, x13
+ adcs x4, x29, x4
+ umulh x28, x21, x14
+ adcs x5, x5, xzr
+ adcs x2, x2, xzr
+ adcs x3, x3, xzr
+ adcs x17, x17, xzr
+ adcs x18, x18, xzr
+ adcs x29, x30, xzr
+ adds x23, x28, x23
+ umulh x28, x21, x13
+ adcs x27, x28, x27
+ umulh x28, x21, x12
+ adcs x26, x28, x26
+ umulh x28, x21, x11
+ adcs x25, x28, x25
+ umulh x28, x21, x10
+ adcs x24, x28, x24
+ umulh x28, x21, x9
+ adcs x22, x28, x22
+ umulh x28, x21, x8
+ mul x21, x21, x14
+ adcs x28, x28, xzr
+ cmn x21, x16
+ adcs x16, x23, x1
+ adcs x1, x27, x19
+ mul x19, x16, x15
+ adcs x20, x26, x20
+ mul x21, x19, x8
+ mul x23, x19, x9
+ mul x26, x19, x10
+ adcs x6, x25, x6
+ mul x25, x19, x11
+ adcs x7, x24, x7
+ mul x24, x19, x12
+ adcs x4, x22, x4
+ mul x22, x19, x13
+ adcs x5, x28, x5
+ umulh x27, x19, x14
+ adcs x2, x2, xzr
+ adcs x3, x3, xzr
+ adcs x17, x17, xzr
+ adcs x18, x18, xzr
+ adcs x28, x29, xzr
+ adds x22, x27, x22
+ umulh x27, x19, x13
+ adcs x24, x27, x24
+ umulh x27, x19, x12
+ adcs x25, x27, x25
+ umulh x27, x19, x11
+ adcs x26, x27, x26
+ umulh x27, x19, x10
+ adcs x23, x27, x23
+ umulh x27, x19, x9
+ adcs x21, x27, x21
+ umulh x27, x19, x8
+ mul x19, x19, x14
+ adcs x27, x27, xzr
+ cmn x19, x16
+ adcs x16, x22, x1
+ adcs x1, x24, x20
+ mul x19, x16, x15
+ adcs x6, x25, x6
+ mul x20, x19, x8
+ mul x22, x19, x9
+ mul x24, x19, x10
+ adcs x7, x26, x7
+ mul x25, x19, x11
+ adcs x4, x23, x4
+ mul x23, x19, x12
+ adcs x5, x21, x5
+ mul x21, x19, x13
+ adcs x2, x27, x2
+ umulh x26, x19, x14
+ adcs x3, x3, xzr
+ adcs x17, x17, xzr
+ adcs x18, x18, xzr
+ adcs x27, x28, xzr
+ adds x21, x26, x21
+ umulh x26, x19, x13
+ adcs x23, x26, x23
+ umulh x26, x19, x12
+ adcs x25, x26, x25
+ umulh x26, x19, x11
+ adcs x24, x26, x24
+ umulh x26, x19, x10
+ adcs x22, x26, x22
+ umulh x26, x19, x9
+ adcs x20, x26, x20
+ umulh x26, x19, x8
+ mul x19, x19, x14
+ adcs x26, x26, xzr
+ cmn x19, x16
+ adcs x16, x21, x1
+ adcs x1, x23, x6
+ mul x6, x16, x15
+ adcs x7, x25, x7
+ mul x19, x6, x8
+ mul x21, x6, x9
+ mul x23, x6, x10
+ adcs x4, x24, x4
+ mul x24, x6, x11
+ adcs x5, x22, x5
+ mul x22, x6, x12
+ adcs x2, x20, x2
+ mul x20, x6, x13
+ adcs x3, x26, x3
+ umulh x25, x6, x14
+ adcs x17, x17, xzr
+ adcs x18, x18, xzr
+ adcs x26, x27, xzr
+ adds x20, x25, x20
+ umulh x25, x6, x13
+ adcs x22, x25, x22
+ umulh x25, x6, x12
+ adcs x24, x25, x24
+ umulh x25, x6, x11
+ adcs x23, x25, x23
+ umulh x25, x6, x10
+ adcs x21, x25, x21
+ umulh x25, x6, x9
+ adcs x19, x25, x19
+ umulh x25, x6, x8
+ mul x6, x6, x14
+ adcs x25, x25, xzr
+ cmn x6, x16
+ adcs x16, x20, x1
+ adcs x1, x22, x7
+ mul x15, x16, x15
+ adcs x4, x24, x4
+ mul x6, x15, x8
+ mul x7, x15, x9
+ mul x20, x15, x10
+ adcs x5, x23, x5
+ mul x22, x15, x11
+ adcs x2, x21, x2
+ mul x21, x15, x12
+ adcs x3, x19, x3
+ mul x19, x15, x13
+ adcs x17, x25, x17
+ umulh x23, x15, x14
+ adcs x18, x18, xzr
+ adcs x24, x26, xzr
+ adds x19, x23, x19
+ umulh x23, x15, x13
+ adcs x21, x23, x21
+ umulh x23, x15, x12
+ adcs x22, x23, x22
+ umulh x23, x15, x11
+ adcs x20, x23, x20
+ umulh x23, x15, x10
+ adcs x7, x23, x7
+ umulh x23, x15, x9
+ adcs x6, x23, x6
+ umulh x23, x15, x8
+ mul x15, x15, x14
+ adcs x23, x23, xzr
+ cmn x15, x16
+ adcs x15, x19, x1
+ adcs x16, x21, x4
+ adcs x1, x22, x5
+ adcs x2, x20, x2
+ adcs x3, x7, x3
+ adcs x17, x6, x17
+ adcs x18, x23, x18
+ adcs x4, x24, xzr
+ subs x14, x15, x14
+ sbcs x13, x16, x13
+ sbcs x12, x1, x12
+ sbcs x11, x2, x11
+ sbcs x10, x3, x10
+ sbcs x9, x17, x9
+ sbcs x8, x18, x8
+ sbcs x4, x4, xzr
+ tst x4, #0x1
+ csel x14, x15, x14, ne
+ csel x13, x16, x13, ne
+ csel x12, x1, x12, ne
+ csel x11, x2, x11, ne
+ csel x10, x3, x10, ne
+ csel x9, x17, x9, ne
+ csel x8, x18, x8, ne
+ stp x14, x13, [x0]
+ stp x12, x11, [x0, #16]
+ stp x10, x9, [x0, #32]
+ str x8, [x0, #48]
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end100:
+ .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L
+
+ .globl mcl_fp_addPre7L
+ .align 2
+ .type mcl_fp_addPre7L,@function
+mcl_fp_addPre7L: // @mcl_fp_addPre7L
+// BB#0:
+ ldp x11, x8, [x2, #40]
+ ldp x13, x9, [x1, #40]
+ ldp x15, x10, [x2, #24]
+ ldp x17, x14, [x2, #8]
+ ldr x16, [x2]
+ ldp x18, x2, [x1]
+ ldr x3, [x1, #16]
+ ldp x1, x12, [x1, #24]
+ adds x16, x16, x18
+ str x16, [x0]
+ adcs x16, x17, x2
+ adcs x14, x14, x3
+ stp x16, x14, [x0, #8]
+ adcs x14, x15, x1
+ adcs x10, x10, x12
+ stp x14, x10, [x0, #24]
+ adcs x10, x11, x13
+ adcs x9, x8, x9
+ adcs x8, xzr, xzr
+ stp x10, x9, [x0, #40]
+ mov x0, x8
+ ret
+.Lfunc_end101:
+ .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L
+
+ .globl mcl_fp_subPre7L
+ .align 2
+ .type mcl_fp_subPre7L,@function
+mcl_fp_subPre7L: // @mcl_fp_subPre7L
+// BB#0:
+ ldp x11, x8, [x2, #40]
+ ldp x13, x9, [x1, #40]
+ ldp x15, x10, [x2, #24]
+ ldp x17, x14, [x2, #8]
+ ldr x16, [x2]
+ ldp x18, x2, [x1]
+ ldr x3, [x1, #16]
+ ldp x1, x12, [x1, #24]
+ subs x16, x18, x16
+ str x16, [x0]
+ sbcs x16, x2, x17
+ sbcs x14, x3, x14
+ stp x16, x14, [x0, #8]
+ sbcs x14, x1, x15
+ sbcs x10, x12, x10
+ stp x14, x10, [x0, #24]
+ sbcs x10, x13, x11
+ sbcs x9, x9, x8
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ stp x10, x9, [x0, #40]
+ mov x0, x8
+ ret
+.Lfunc_end102:
+ .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L
+
+ .globl mcl_fp_shr1_7L
+ .align 2
+ .type mcl_fp_shr1_7L,@function
+mcl_fp_shr1_7L: // @mcl_fp_shr1_7L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x14, x10, [x1, #40]
+ ldp x11, x12, [x1, #16]
+ ldr x13, [x1, #32]
+ extr x8, x9, x8, #1
+ extr x9, x11, x9, #1
+ extr x11, x12, x11, #1
+ extr x12, x13, x12, #1
+ extr x13, x14, x13, #1
+ extr x14, x10, x14, #1
+ lsr x10, x10, #1
+ stp x8, x9, [x0]
+ stp x11, x12, [x0, #16]
+ stp x13, x14, [x0, #32]
+ str x10, [x0, #48]
+ ret
+.Lfunc_end103:
+ .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L
+
+ .globl mcl_fp_add7L
+ .align 2
+ .type mcl_fp_add7L,@function
+mcl_fp_add7L: // @mcl_fp_add7L
+// BB#0:
+ ldp x11, x8, [x2, #40]
+ ldp x13, x9, [x1, #40]
+ ldp x15, x10, [x2, #24]
+ ldp x17, x14, [x2, #8]
+ ldr x16, [x2]
+ ldp x18, x2, [x1]
+ ldr x4, [x1, #16]
+ ldp x1, x12, [x1, #24]
+ adds x16, x16, x18
+ ldp x5, x18, [x3, #40]
+ adcs x17, x17, x2
+ adcs x2, x14, x4
+ ldr x4, [x3, #32]
+ adcs x15, x15, x1
+ adcs x10, x10, x12
+ ldp x12, x1, [x3]
+ stp x16, x17, [x0]
+ stp x2, x15, [x0, #16]
+ adcs x6, x11, x13
+ stp x10, x6, [x0, #32]
+ adcs x8, x8, x9
+ str x8, [x0, #48]
+ adcs x7, xzr, xzr
+ ldp x9, x11, [x3, #16]
+ subs x14, x16, x12
+ sbcs x13, x17, x1
+ sbcs x12, x2, x9
+ sbcs x11, x15, x11
+ sbcs x10, x10, x4
+ sbcs x9, x6, x5
+ sbcs x8, x8, x18
+ sbcs x15, x7, xzr
+ and w15, w15, #0x1
+ tbnz w15, #0, .LBB104_2
+// BB#1: // %nocarry
+ stp x14, x13, [x0]
+ stp x12, x11, [x0, #16]
+ stp x10, x9, [x0, #32]
+ str x8, [x0, #48]
+.LBB104_2: // %carry
+ ret
+.Lfunc_end104:
+ .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L
+
+ .globl mcl_fp_addNF7L
+ .align 2
+ .type mcl_fp_addNF7L,@function
+mcl_fp_addNF7L: // @mcl_fp_addNF7L
+// BB#0:
+ ldp x11, x8, [x1, #40]
+ ldp x13, x9, [x2, #40]
+ ldp x15, x10, [x1, #24]
+ ldp x17, x14, [x1, #8]
+ ldr x16, [x1]
+ ldp x18, x1, [x2]
+ ldr x4, [x2, #16]
+ ldp x2, x12, [x2, #24]
+ adds x16, x18, x16
+ adcs x17, x1, x17
+ adcs x14, x4, x14
+ ldp x4, x18, [x3, #40]
+ adcs x15, x2, x15
+ adcs x10, x12, x10
+ ldp x12, x2, [x3]
+ adcs x11, x13, x11
+ ldr x13, [x3, #16]
+ ldp x3, x1, [x3, #24]
+ adcs x8, x9, x8
+ subs x9, x16, x12
+ sbcs x12, x17, x2
+ sbcs x13, x14, x13
+ sbcs x2, x15, x3
+ sbcs x1, x10, x1
+ sbcs x3, x11, x4
+ sbcs x18, x8, x18
+ asr x4, x18, #63
+ cmp x4, #0 // =0
+ csel x9, x16, x9, lt
+ csel x12, x17, x12, lt
+ csel x13, x14, x13, lt
+ csel x14, x15, x2, lt
+ csel x10, x10, x1, lt
+ csel x11, x11, x3, lt
+ csel x8, x8, x18, lt
+ stp x9, x12, [x0]
+ stp x13, x14, [x0, #16]
+ stp x10, x11, [x0, #32]
+ str x8, [x0, #48]
+ ret
+.Lfunc_end105:
+ .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L
+
+ .globl mcl_fp_sub7L
+ .align 2
+ .type mcl_fp_sub7L,@function
+mcl_fp_sub7L: // @mcl_fp_sub7L
+// BB#0:
+ ldp x13, x14, [x2, #40]
+ ldp x17, x15, [x1, #40]
+ ldp x11, x12, [x2, #24]
+ ldp x9, x10, [x2, #8]
+ ldr x8, [x2]
+ ldp x18, x2, [x1]
+ ldr x4, [x1, #16]
+ ldp x1, x16, [x1, #24]
+ subs x8, x18, x8
+ sbcs x9, x2, x9
+ stp x8, x9, [x0]
+ sbcs x10, x4, x10
+ sbcs x11, x1, x11
+ stp x10, x11, [x0, #16]
+ sbcs x12, x16, x12
+ sbcs x13, x17, x13
+ stp x12, x13, [x0, #32]
+ sbcs x14, x15, x14
+ str x14, [x0, #48]
+ ngcs x15, xzr
+ and w15, w15, #0x1
+ tbnz w15, #0, .LBB106_2
+// BB#1: // %nocarry
+ ret
+.LBB106_2: // %carry
+ ldp x16, x17, [x3]
+ ldp x18, x1, [x3, #16]
+ ldr x2, [x3, #32]
+ ldp x3, x15, [x3, #40]
+ adds x8, x16, x8
+ adcs x9, x17, x9
+ adcs x10, x18, x10
+ adcs x11, x1, x11
+ adcs x12, x2, x12
+ adcs x13, x3, x13
+ adcs x14, x15, x14
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ stp x12, x13, [x0, #32]
+ str x14, [x0, #48]
+ ret
+.Lfunc_end106:
+ .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L
+
+ .globl mcl_fp_subNF7L
+ .align 2
+ .type mcl_fp_subNF7L,@function
+mcl_fp_subNF7L: // @mcl_fp_subNF7L
+// BB#0:
+ ldp x11, x8, [x2, #40]
+ ldp x13, x9, [x1, #40]
+ ldp x15, x10, [x2, #24]
+ ldp x17, x14, [x2, #8]
+ ldr x16, [x2]
+ ldp x18, x2, [x1]
+ ldr x4, [x1, #16]
+ ldp x1, x12, [x1, #24]
+ subs x16, x18, x16
+ sbcs x17, x2, x17
+ sbcs x14, x4, x14
+ ldp x4, x18, [x3, #40]
+ sbcs x15, x1, x15
+ sbcs x10, x12, x10
+ ldp x12, x1, [x3]
+ sbcs x11, x13, x11
+ ldr x13, [x3, #16]
+ ldp x3, x2, [x3, #24]
+ sbcs x8, x9, x8
+ asr x9, x8, #63
+ and x1, x9, x1
+ and x13, x9, x13
+ and x3, x9, x3
+ and x2, x9, x2
+ and x4, x9, x4
+ and x18, x9, x18
+ extr x9, x9, x8, #63
+ and x9, x9, x12
+ adds x9, x9, x16
+ str x9, [x0]
+ adcs x9, x1, x17
+ str x9, [x0, #8]
+ adcs x9, x13, x14
+ str x9, [x0, #16]
+ adcs x9, x3, x15
+ str x9, [x0, #24]
+ adcs x9, x2, x10
+ str x9, [x0, #32]
+ adcs x9, x4, x11
+ adcs x8, x18, x8
+ stp x9, x8, [x0, #40]
+ ret
+.Lfunc_end107:
+ .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L
+
+ .globl mcl_fpDbl_add7L
+ .align 2
+ .type mcl_fpDbl_add7L,@function
+mcl_fpDbl_add7L: // @mcl_fpDbl_add7L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ ldp x8, x9, [x2, #96]
+ ldp x10, x11, [x1, #96]
+ ldp x12, x13, [x2, #80]
+ ldp x14, x15, [x1, #80]
+ ldp x16, x17, [x2, #64]
+ ldp x18, x4, [x1, #64]
+ ldp x5, x6, [x2, #48]
+ ldp x7, x19, [x1, #48]
+ ldp x20, x21, [x2, #32]
+ ldp x22, x23, [x1, #32]
+ ldp x24, x25, [x2, #16]
+ ldp x27, x2, [x2]
+ ldp x28, x29, [x1, #16]
+ ldp x26, x1, [x1]
+ adds x26, x27, x26
+ ldr x27, [x3, #48]
+ str x26, [x0]
+ adcs x1, x2, x1
+ ldp x2, x26, [x3, #32]
+ str x1, [x0, #8]
+ adcs x1, x24, x28
+ ldp x24, x28, [x3, #16]
+ str x1, [x0, #16]
+ ldp x1, x3, [x3]
+ adcs x25, x25, x29
+ adcs x20, x20, x22
+ stp x25, x20, [x0, #24]
+ adcs x20, x21, x23
+ adcs x5, x5, x7
+ stp x20, x5, [x0, #40]
+ adcs x5, x6, x19
+ adcs x16, x16, x18
+ adcs x17, x17, x4
+ adcs x12, x12, x14
+ adcs x13, x13, x15
+ adcs x8, x8, x10
+ adcs x9, x9, x11
+ adcs x10, xzr, xzr
+ subs x11, x5, x1
+ sbcs x14, x16, x3
+ sbcs x15, x17, x24
+ sbcs x18, x12, x28
+ sbcs x1, x13, x2
+ sbcs x2, x8, x26
+ sbcs x3, x9, x27
+ sbcs x10, x10, xzr
+ tst x10, #0x1
+ csel x10, x5, x11, ne
+ csel x11, x16, x14, ne
+ csel x14, x17, x15, ne
+ csel x12, x12, x18, ne
+ csel x13, x13, x1, ne
+ csel x8, x8, x2, ne
+ csel x9, x9, x3, ne
+ stp x10, x11, [x0, #56]
+ stp x14, x12, [x0, #72]
+ stp x13, x8, [x0, #88]
+ str x9, [x0, #104]
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end108:
+ .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L
+
+ .globl mcl_fpDbl_sub7L
+ .align 2
+ .type mcl_fpDbl_sub7L,@function
+mcl_fpDbl_sub7L: // @mcl_fpDbl_sub7L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ ldp x9, x8, [x2, #96]
+ ldp x11, x10, [x1, #96]
+ ldp x12, x13, [x2, #80]
+ ldp x14, x15, [x1, #80]
+ ldp x16, x17, [x2, #64]
+ ldp x18, x4, [x1, #64]
+ ldp x5, x6, [x2, #48]
+ ldp x7, x19, [x1, #48]
+ ldp x20, x21, [x2, #32]
+ ldp x22, x23, [x1, #32]
+ ldp x24, x25, [x2, #16]
+ ldp x26, x2, [x2]
+ ldp x28, x29, [x1, #16]
+ ldp x27, x1, [x1]
+ subs x26, x27, x26
+ ldr x27, [x3, #48]
+ str x26, [x0]
+ sbcs x1, x1, x2
+ ldp x2, x26, [x3, #32]
+ str x1, [x0, #8]
+ sbcs x1, x28, x24
+ ldp x24, x28, [x3, #16]
+ str x1, [x0, #16]
+ ldp x1, x3, [x3]
+ sbcs x25, x29, x25
+ sbcs x20, x22, x20
+ stp x25, x20, [x0, #24]
+ sbcs x20, x23, x21
+ sbcs x5, x7, x5
+ stp x20, x5, [x0, #40]
+ sbcs x5, x19, x6
+ sbcs x16, x18, x16
+ sbcs x17, x4, x17
+ sbcs x12, x14, x12
+ sbcs x13, x15, x13
+ sbcs x9, x11, x9
+ sbcs x8, x10, x8
+ ngcs x10, xzr
+ tst x10, #0x1
+ csel x10, x27, xzr, ne
+ csel x11, x26, xzr, ne
+ csel x14, x2, xzr, ne
+ csel x15, x28, xzr, ne
+ csel x18, x24, xzr, ne
+ csel x2, x3, xzr, ne
+ csel x1, x1, xzr, ne
+ adds x1, x1, x5
+ adcs x16, x2, x16
+ stp x1, x16, [x0, #56]
+ adcs x16, x18, x17
+ adcs x12, x15, x12
+ stp x16, x12, [x0, #72]
+ adcs x12, x14, x13
+ adcs x9, x11, x9
+ stp x12, x9, [x0, #88]
+ adcs x8, x10, x8
+ str x8, [x0, #104]
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end109:
+ .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L
+
+ .align 2
+ .type .LmulPv512x64,@function
+.LmulPv512x64: // @mulPv512x64
+// BB#0:
+ ldr x9, [x0]
+ mul x10, x9, x1
+ str x10, [x8]
+ ldr x10, [x0, #8]
+ umulh x9, x9, x1
+ mul x11, x10, x1
+ adds x9, x9, x11
+ str x9, [x8, #8]
+ ldr x9, [x0, #16]
+ umulh x10, x10, x1
+ mul x11, x9, x1
+ adcs x10, x10, x11
+ str x10, [x8, #16]
+ ldr x10, [x0, #24]
+ umulh x9, x9, x1
+ mul x11, x10, x1
+ adcs x9, x9, x11
+ str x9, [x8, #24]
+ ldr x9, [x0, #32]
+ umulh x10, x10, x1
+ mul x11, x9, x1
+ adcs x10, x10, x11
+ str x10, [x8, #32]
+ ldr x10, [x0, #40]
+ umulh x9, x9, x1
+ mul x11, x10, x1
+ adcs x9, x9, x11
+ str x9, [x8, #40]
+ ldr x9, [x0, #48]
+ umulh x10, x10, x1
+ mul x11, x9, x1
+ adcs x10, x10, x11
+ str x10, [x8, #48]
+ ldr x10, [x0, #56]
+ umulh x9, x9, x1
+ mul x11, x10, x1
+ umulh x10, x10, x1
+ adcs x9, x9, x11
+ str x9, [x8, #56]
+ adcs x9, x10, xzr
+ str x9, [x8, #64]
+ ret
+.Lfunc_end110:
+ .size .LmulPv512x64, .Lfunc_end110-.LmulPv512x64
+
+ .globl mcl_fp_mulUnitPre8L
+ .align 2
+ .type mcl_fp_mulUnitPre8L,@function
+mcl_fp_mulUnitPre8L: // @mcl_fp_mulUnitPre8L
+// BB#0:
+ stp x20, x19, [sp, #-32]!
+ stp x29, x30, [sp, #16]
+ add x29, sp, #16 // =16
+ sub sp, sp, #80 // =80
+ mov x19, x0
+ mov x8, sp
+ mov x0, x1
+ mov x1, x2
+ bl .LmulPv512x64
+ ldp x9, x8, [sp, #56]
+ ldp x11, x10, [sp, #40]
+ ldp x16, x12, [sp, #24]
+ ldp x13, x14, [sp]
+ ldr x15, [sp, #16]
+ stp x13, x14, [x19]
+ stp x15, x16, [x19, #16]
+ stp x12, x11, [x19, #32]
+ stp x10, x9, [x19, #48]
+ str x8, [x19, #64]
+ sub sp, x29, #16 // =16
+ ldp x29, x30, [sp, #16]
+ ldp x20, x19, [sp], #32
+ ret
+.Lfunc_end111:
+ .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L
+
+ .globl mcl_fpDbl_mulPre8L
+ .align 2
+ .type mcl_fpDbl_mulPre8L,@function
+mcl_fpDbl_mulPre8L: // @mcl_fpDbl_mulPre8L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #144 // =144
+ mov x20, x2
+ mov x21, x1
+ mov x19, x0
+ bl mcl_fpDbl_mulPre4L
+ add x0, x19, #64 // =64
+ add x1, x21, #32 // =32
+ add x2, x20, #32 // =32
+ bl mcl_fpDbl_mulPre4L
+ ldp x8, x9, [x20, #48]
+ ldp x10, x11, [x20, #32]
+ ldp x12, x13, [x20]
+ ldp x14, x15, [x20, #16]
+ adds x18, x12, x10
+ str x18, [sp, #8] // 8-byte Folded Spill
+ ldp x10, x12, [x21, #16]
+ ldp x16, x17, [x21, #48]
+ adcs x22, x13, x11
+ ldp x11, x13, [x21]
+ adcs x23, x14, x8
+ ldp x8, x14, [x21, #32]
+ stp x18, x22, [sp, #16]
+ adcs x21, x15, x9
+ stp x23, x21, [sp, #32]
+ adcs x24, xzr, xzr
+ adds x25, x11, x8
+ adcs x26, x13, x14
+ stp x25, x26, [sp, #48]
+ adcs x27, x10, x16
+ adcs x28, x12, x17
+ stp x27, x28, [sp, #64]
+ adcs x20, xzr, xzr
+ add x0, sp, #80 // =80
+ add x1, sp, #48 // =48
+ add x2, sp, #16 // =16
+ bl mcl_fpDbl_mulPre4L
+ cmp x24, #0 // =0
+ csel x8, x28, xzr, ne
+ and x9, x24, x20
+ ldp x11, x10, [sp, #128]
+ ldp x13, x12, [sp, #112]
+ ldp x14, x15, [x19, #48]
+ ldp x16, x17, [x19, #32]
+ ldp x18, x0, [x19, #16]
+ csel x1, x27, xzr, ne
+ csel x2, x26, xzr, ne
+ csel x3, x25, xzr, ne
+ cmp x20, #0 // =0
+ ldp x4, x5, [x19]
+ csel x6, x21, xzr, ne
+ csel x7, x23, xzr, ne
+ csel x20, x22, xzr, ne
+ ldr x21, [sp, #8] // 8-byte Folded Reload
+ csel x21, x21, xzr, ne
+ adds x3, x21, x3
+ adcs x2, x20, x2
+ ldp x20, x21, [sp, #96]
+ adcs x1, x7, x1
+ adcs x8, x6, x8
+ adcs x6, xzr, xzr
+ adds x13, x3, x13
+ ldp x3, x7, [sp, #80]
+ adcs x12, x2, x12
+ adcs x11, x1, x11
+ ldp x1, x2, [x19, #112]
+ adcs x8, x8, x10
+ adcs x9, x6, x9
+ ldp x10, x6, [x19, #96]
+ subs x3, x3, x4
+ sbcs x4, x7, x5
+ ldp x5, x7, [x19, #80]
+ sbcs x18, x20, x18
+ sbcs x0, x21, x0
+ ldp x20, x21, [x19, #64]
+ sbcs x13, x13, x16
+ sbcs x12, x12, x17
+ sbcs x11, x11, x14
+ sbcs x8, x8, x15
+ sbcs x9, x9, xzr
+ subs x3, x3, x20
+ sbcs x4, x4, x21
+ sbcs x18, x18, x5
+ sbcs x0, x0, x7
+ sbcs x13, x13, x10
+ sbcs x12, x12, x6
+ sbcs x11, x11, x1
+ sbcs x8, x8, x2
+ sbcs x9, x9, xzr
+ adds x16, x16, x3
+ str x16, [x19, #32]
+ adcs x16, x17, x4
+ adcs x14, x14, x18
+ stp x16, x14, [x19, #40]
+ adcs x14, x15, x0
+ adcs x13, x20, x13
+ stp x14, x13, [x19, #56]
+ adcs x12, x21, x12
+ adcs x11, x5, x11
+ stp x12, x11, [x19, #72]
+ adcs x8, x7, x8
+ str x8, [x19, #88]
+ adcs x8, x10, x9
+ str x8, [x19, #96]
+ adcs x8, x6, xzr
+ str x8, [x19, #104]
+ adcs x8, x1, xzr
+ str x8, [x19, #112]
+ adcs x8, x2, xzr
+ str x8, [x19, #120]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end112:
+ .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L
+
+ .globl mcl_fpDbl_sqrPre8L
+ .align 2
+ .type mcl_fpDbl_sqrPre8L,@function
+mcl_fpDbl_sqrPre8L: // @mcl_fpDbl_sqrPre8L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #128 // =128
+ mov x20, x1
+ mov x19, x0
+ mov x2, x20
+ bl mcl_fpDbl_mulPre4L
+ add x0, x19, #64 // =64
+ add x1, x20, #32 // =32
+ mov x2, x1
+ bl mcl_fpDbl_mulPre4L
+ ldp x8, x9, [x20, #16]
+ ldp x10, x11, [x20, #32]
+ ldp x12, x13, [x20]
+ ldp x14, x15, [x20, #48]
+ adds x22, x12, x10
+ adcs x23, x13, x11
+ adcs x20, x8, x14
+ adcs x21, x9, x15
+ stp x22, x23, [sp, #32]
+ stp x22, x23, [sp]
+ stp x20, x21, [sp, #48]
+ stp x20, x21, [sp, #16]
+ adcs x24, xzr, xzr
+ add x0, sp, #64 // =64
+ add x1, sp, #32 // =32
+ mov x2, sp
+ bl mcl_fpDbl_mulPre4L
+ ldp x8, x9, [x19, #48]
+ ldp x10, x11, [x19]
+ ldp x12, x13, [sp, #64]
+ ldp x14, x15, [x19, #16]
+ ldp x16, x17, [sp, #80]
+ ldp x18, x0, [x19, #32]
+ subs x10, x12, x10
+ ldp x1, x12, [sp, #96]
+ sbcs x11, x13, x11
+ sbcs x14, x16, x14
+ ldp x13, x16, [sp, #112]
+ sbcs x15, x17, x15
+ sbcs x17, x1, x18
+ ldp x1, x2, [x19, #64]
+ ldp x3, x4, [x19, #80]
+ ldp x5, x6, [x19, #96]
+ ldp x7, x25, [x19, #112]
+ lsr x26, x21, #63
+ sbcs x12, x12, x0
+ sbcs x13, x13, x8
+ sbcs x16, x16, x9
+ sbcs x27, x24, xzr
+ subs x10, x10, x1
+ sbcs x11, x11, x2
+ sbcs x14, x14, x3
+ sbcs x15, x15, x4
+ sbcs x17, x17, x5
+ sbcs x12, x12, x6
+ sbcs x13, x13, x7
+ sbcs x16, x16, x25
+ sbcs x27, x27, xzr
+ adds x22, x22, x22
+ adcs x23, x23, x23
+ adcs x20, x20, x20
+ adcs x21, x21, x21
+ cmp x24, #0 // =0
+ csel x24, x26, xzr, ne
+ csel x21, x21, xzr, ne
+ csel x20, x20, xzr, ne
+ csel x23, x23, xzr, ne
+ csel x22, x22, xzr, ne
+ adds x17, x17, x22
+ adcs x12, x12, x23
+ adcs x13, x13, x20
+ adcs x16, x16, x21
+ adcs x20, x27, x24
+ adds x10, x10, x18
+ str x10, [x19, #32]
+ adcs x10, x11, x0
+ adcs x8, x14, x8
+ stp x10, x8, [x19, #40]
+ adcs x8, x15, x9
+ str x8, [x19, #56]
+ adcs x8, x17, x1
+ str x8, [x19, #64]
+ adcs x8, x12, x2
+ str x8, [x19, #72]
+ adcs x8, x13, x3
+ str x8, [x19, #80]
+ adcs x8, x16, x4
+ str x8, [x19, #88]
+ adcs x8, x20, x5
+ str x8, [x19, #96]
+ adcs x8, x6, xzr
+ str x8, [x19, #104]
+ adcs x8, x7, xzr
+ str x8, [x19, #112]
+ adcs x8, x25, xzr
+ str x8, [x19, #120]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end113:
+ .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L
+
+ .globl mcl_fp_mont8L
+ .align 2
+ .type mcl_fp_mont8L,@function
+mcl_fp_mont8L: // @mcl_fp_mont8L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #1424 // =1424
+ mov x20, x3
+ mov x26, x2
+ str x26, [sp, #120] // 8-byte Folded Spill
+ ldur x19, [x20, #-8]
+ str x19, [sp, #136] // 8-byte Folded Spill
+ ldr x9, [x26]
+ mov x27, x1
+ str x27, [sp, #128] // 8-byte Folded Spill
+ str x0, [sp, #112] // 8-byte Folded Spill
+ sub x8, x29, #160 // =160
+ mov x0, x27
+ mov x1, x9
+ bl .LmulPv512x64
+ ldur x24, [x29, #-160]
+ ldur x8, [x29, #-96]
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldur x8, [x29, #-104]
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldur x8, [x29, #-112]
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldur x8, [x29, #-120]
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldur x8, [x29, #-128]
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldur x8, [x29, #-136]
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldur x8, [x29, #-144]
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldur x8, [x29, #-152]
+ str x8, [sp, #48] // 8-byte Folded Spill
+ mul x1, x24, x19
+ sub x8, x29, #240 // =240
+ mov x0, x20
+ bl .LmulPv512x64
+ ldur x8, [x29, #-176]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldur x8, [x29, #-184]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldur x8, [x29, #-192]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldp x19, x28, [x29, #-208]
+ ldp x21, x23, [x29, #-224]
+ ldp x25, x22, [x29, #-240]
+ ldr x1, [x26, #8]
+ add x8, sp, #1184 // =1184
+ mov x0, x27
+ bl .LmulPv512x64
+ cmn x25, x24
+ ldr x8, [sp, #1248]
+ ldr x9, [sp, #1240]
+ ldp x10, x12, [sp, #48]
+ adcs x10, x22, x10
+ ldr x11, [sp, #1232]
+ adcs x12, x21, x12
+ ldr x13, [sp, #1224]
+ ldp x14, x16, [sp, #64]
+ adcs x14, x23, x14
+ ldr x15, [sp, #1216]
+ adcs x16, x19, x16
+ ldr x17, [sp, #1208]
+ ldp x18, x1, [sp, #80]
+ adcs x18, x28, x18
+ ldr x0, [sp, #1200]
+ ldp x2, x4, [sp, #24]
+ adcs x1, x2, x1
+ ldr x2, [sp, #1184]
+ ldp x3, x5, [sp, #96]
+ adcs x3, x4, x3
+ ldr x4, [sp, #1192]
+ ldr x6, [sp, #40] // 8-byte Folded Reload
+ adcs x5, x6, x5
+ adcs x6, xzr, xzr
+ adds x19, x10, x2
+ adcs x10, x12, x4
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x0
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ adcs x8, x6, x8
+ stp x8, x9, [sp, #96]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #48]
+ ldr x22, [sp, #136] // 8-byte Folded Reload
+ mul x1, x19, x22
+ add x8, sp, #1104 // =1104
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #1168]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1160]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #1152]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #1144]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldr x25, [sp, #1136]
+ ldr x26, [sp, #1128]
+ ldr x27, [sp, #1120]
+ ldr x21, [sp, #1112]
+ ldr x28, [sp, #1104]
+ ldp x24, x23, [sp, #120]
+ ldr x1, [x24, #16]
+ add x8, sp, #1024 // =1024
+ mov x0, x23
+ bl .LmulPv512x64
+ cmn x19, x28
+ ldr x8, [sp, #1088]
+ ldr x9, [sp, #1080]
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ adcs x10, x10, x21
+ ldr x11, [sp, #1072]
+ ldp x14, x12, [sp, #80]
+ adcs x12, x12, x27
+ ldr x13, [sp, #1064]
+ adcs x14, x14, x26
+ ldr x15, [sp, #1056]
+ ldp x18, x16, [sp, #64]
+ adcs x16, x16, x25
+ ldr x17, [sp, #1048]
+ ldp x0, x2, [sp, #8]
+ adcs x18, x18, x0
+ ldr x0, [sp, #1040]
+ ldr x1, [sp, #56] // 8-byte Folded Reload
+ adcs x1, x1, x2
+ ldr x2, [sp, #1024]
+ ldp x5, x3, [sp, #96]
+ ldp x4, x6, [sp, #24]
+ adcs x3, x3, x4
+ ldr x4, [sp, #1032]
+ adcs x5, x5, x6
+ ldr x6, [sp, #48] // 8-byte Folded Reload
+ adcs x6, x6, xzr
+ adds x19, x10, x2
+ adcs x10, x12, x4
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x0
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ adcs x8, x6, x8
+ stp x8, x9, [sp, #96]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #48]
+ mul x1, x19, x22
+ add x8, sp, #944 // =944
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #1008]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1000]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #992]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #984]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldr x25, [sp, #976]
+ ldr x26, [sp, #968]
+ ldr x27, [sp, #960]
+ ldr x21, [sp, #952]
+ ldr x28, [sp, #944]
+ mov x22, x24
+ ldr x1, [x22, #24]
+ add x8, sp, #864 // =864
+ mov x0, x23
+ bl .LmulPv512x64
+ cmn x19, x28
+ ldr x8, [sp, #928]
+ ldr x9, [sp, #920]
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ adcs x10, x10, x21
+ ldr x11, [sp, #912]
+ ldp x14, x12, [sp, #80]
+ adcs x12, x12, x27
+ ldr x13, [sp, #904]
+ adcs x14, x14, x26
+ ldr x15, [sp, #896]
+ ldp x18, x16, [sp, #64]
+ adcs x16, x16, x25
+ ldr x17, [sp, #888]
+ ldp x0, x2, [sp, #8]
+ adcs x18, x18, x0
+ ldr x0, [sp, #880]
+ ldr x1, [sp, #56] // 8-byte Folded Reload
+ adcs x1, x1, x2
+ ldr x2, [sp, #864]
+ ldp x5, x3, [sp, #96]
+ ldp x4, x6, [sp, #24]
+ adcs x3, x3, x4
+ ldr x4, [sp, #872]
+ adcs x5, x5, x6
+ ldr x6, [sp, #48] // 8-byte Folded Reload
+ adcs x6, x6, xzr
+ adds x19, x10, x2
+ adcs x10, x12, x4
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x0
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ adcs x8, x6, x8
+ stp x8, x9, [sp, #96]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #48]
+ ldr x23, [sp, #136] // 8-byte Folded Reload
+ mul x1, x19, x23
+ add x8, sp, #784 // =784
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #848]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #840]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #832]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x24, [sp, #824]
+ ldr x25, [sp, #816]
+ ldr x26, [sp, #808]
+ ldr x27, [sp, #800]
+ ldr x21, [sp, #792]
+ ldr x28, [sp, #784]
+ ldr x1, [x22, #32]
+ add x8, sp, #704 // =704
+ ldr x22, [sp, #128] // 8-byte Folded Reload
+ mov x0, x22
+ bl .LmulPv512x64
+ cmn x19, x28
+ ldr x8, [sp, #768]
+ ldr x9, [sp, #760]
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ adcs x10, x10, x21
+ ldr x11, [sp, #752]
+ ldp x14, x12, [sp, #80]
+ adcs x12, x12, x27
+ ldr x13, [sp, #744]
+ adcs x14, x14, x26
+ ldr x15, [sp, #736]
+ ldp x18, x16, [sp, #64]
+ adcs x16, x16, x25
+ ldr x17, [sp, #728]
+ adcs x18, x18, x24
+ ldr x0, [sp, #720]
+ ldr x1, [sp, #56] // 8-byte Folded Reload
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldr x2, [sp, #704]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldr x4, [sp, #712]
+ ldr x6, [sp, #32] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ ldr x6, [sp, #48] // 8-byte Folded Reload
+ adcs x6, x6, xzr
+ adds x19, x10, x2
+ adcs x10, x12, x4
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x0
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ adcs x8, x6, x8
+ stp x8, x9, [sp, #96]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #48]
+ mul x1, x19, x23
+ add x8, sp, #624 // =624
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #688]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #680]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #672]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x24, [sp, #664]
+ ldr x25, [sp, #656]
+ ldr x26, [sp, #648]
+ ldr x27, [sp, #640]
+ ldr x21, [sp, #632]
+ ldr x28, [sp, #624]
+ ldr x23, [sp, #120] // 8-byte Folded Reload
+ ldr x1, [x23, #40]
+ add x8, sp, #544 // =544
+ mov x0, x22
+ bl .LmulPv512x64
+ cmn x19, x28
+ ldr x8, [sp, #608]
+ ldr x9, [sp, #600]
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ adcs x10, x10, x21
+ ldr x11, [sp, #592]
+ ldp x14, x12, [sp, #80]
+ adcs x12, x12, x27
+ ldr x13, [sp, #584]
+ adcs x14, x14, x26
+ ldr x15, [sp, #576]
+ ldp x18, x16, [sp, #64]
+ adcs x16, x16, x25
+ ldr x17, [sp, #568]
+ adcs x18, x18, x24
+ ldr x0, [sp, #560]
+ ldr x1, [sp, #56] // 8-byte Folded Reload
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldr x2, [sp, #544]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldr x4, [sp, #552]
+ ldr x6, [sp, #32] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ ldr x6, [sp, #48] // 8-byte Folded Reload
+ adcs x6, x6, xzr
+ adds x19, x10, x2
+ adcs x10, x12, x4
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x0
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ adcs x8, x6, x8
+ stp x8, x9, [sp, #96]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #48]
+ ldr x22, [sp, #136] // 8-byte Folded Reload
+ mul x1, x19, x22
+ add x8, sp, #464 // =464
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #528]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #520]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #512]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldp x25, x24, [sp, #496]
+ ldp x27, x26, [sp, #480]
+ ldp x28, x21, [sp, #464]
+ ldr x1, [x23, #48]
+ add x8, sp, #384 // =384
+ ldr x23, [sp, #128] // 8-byte Folded Reload
+ mov x0, x23
+ bl .LmulPv512x64
+ cmn x19, x28
+ ldp x9, x8, [sp, #440]
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ adcs x10, x10, x21
+ ldp x13, x11, [sp, #424]
+ ldp x14, x12, [sp, #80]
+ adcs x12, x12, x27
+ adcs x14, x14, x26
+ ldp x17, x15, [sp, #408]
+ ldp x18, x16, [sp, #64]
+ adcs x16, x16, x25
+ adcs x18, x18, x24
+ ldr x1, [sp, #56] // 8-byte Folded Reload
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldr x2, [sp, #384]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldp x4, x0, [sp, #392]
+ ldr x6, [sp, #32] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ ldr x6, [sp, #48] // 8-byte Folded Reload
+ adcs x6, x6, xzr
+ adds x19, x10, x2
+ adcs x10, x12, x4
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x0
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ adcs x8, x6, x8
+ stp x8, x9, [sp, #96]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #48]
+ mul x1, x19, x22
+ add x8, sp, #304 // =304
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #368]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldp x22, x8, [sp, #352]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldp x25, x24, [sp, #336]
+ ldp x27, x26, [sp, #320]
+ ldp x28, x21, [sp, #304]
+ ldr x8, [sp, #120] // 8-byte Folded Reload
+ ldr x1, [x8, #56]
+ add x8, sp, #224 // =224
+ mov x0, x23
+ bl .LmulPv512x64
+ cmn x19, x28
+ ldp x9, x8, [sp, #280]
+ ldr x10, [sp, #40] // 8-byte Folded Reload
+ adcs x10, x10, x21
+ ldp x13, x11, [sp, #264]
+ ldp x14, x12, [sp, #80]
+ adcs x12, x12, x27
+ adcs x14, x14, x26
+ ldp x17, x15, [sp, #248]
+ ldp x18, x16, [sp, #64]
+ adcs x16, x16, x25
+ adcs x18, x18, x24
+ ldr x1, [sp, #56] // 8-byte Folded Reload
+ adcs x1, x1, x22
+ ldr x2, [sp, #224]
+ ldp x5, x3, [sp, #96]
+ ldp x4, x6, [sp, #24]
+ adcs x3, x3, x4
+ ldp x4, x0, [sp, #232]
+ adcs x5, x5, x6
+ ldr x6, [sp, #48] // 8-byte Folded Reload
+ adcs x6, x6, xzr
+ adds x19, x10, x2
+ adcs x21, x12, x4
+ adcs x22, x14, x0
+ adcs x23, x16, x17
+ adcs x24, x18, x15
+ adcs x25, x1, x13
+ adcs x10, x3, x11
+ str x10, [sp, #128] // 8-byte Folded Spill
+ adcs x27, x5, x9
+ adcs x28, x6, x8
+ adcs x26, xzr, xzr
+ ldr x8, [sp, #136] // 8-byte Folded Reload
+ mul x1, x19, x8
+ add x8, sp, #144 // =144
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x15, x8, [sp, #200]
+ ldp x9, x10, [sp, #144]
+ ldp x11, x12, [sp, #160]
+ cmn x19, x9
+ ldp x13, x9, [sp, #176]
+ adcs x10, x21, x10
+ ldr x14, [sp, #192]
+ adcs x11, x22, x11
+ adcs x12, x23, x12
+ adcs x13, x24, x13
+ adcs x9, x25, x9
+ ldp x16, x17, [x20, #48]
+ ldp x18, x0, [x20, #32]
+ ldp x1, x2, [x20, #16]
+ ldp x3, x4, [x20]
+ ldr x5, [sp, #128] // 8-byte Folded Reload
+ adcs x14, x5, x14
+ adcs x15, x27, x15
+ adcs x8, x28, x8
+ adcs x5, x26, xzr
+ subs x3, x10, x3
+ sbcs x4, x11, x4
+ sbcs x1, x12, x1
+ sbcs x2, x13, x2
+ sbcs x18, x9, x18
+ sbcs x0, x14, x0
+ sbcs x16, x15, x16
+ sbcs x17, x8, x17
+ sbcs x5, x5, xzr
+ tst x5, #0x1
+ csel x10, x10, x3, ne
+ csel x11, x11, x4, ne
+ csel x12, x12, x1, ne
+ csel x13, x13, x2, ne
+ csel x9, x9, x18, ne
+ csel x14, x14, x0, ne
+ csel x15, x15, x16, ne
+ csel x8, x8, x17, ne
+ ldr x16, [sp, #112] // 8-byte Folded Reload
+ stp x10, x11, [x16]
+ stp x12, x13, [x16, #16]
+ stp x9, x14, [x16, #32]
+ stp x15, x8, [x16, #48]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end114:
+ .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L
+
+ .globl mcl_fp_montNF8L
+ .align 2
+ .type mcl_fp_montNF8L,@function
+mcl_fp_montNF8L: // @mcl_fp_montNF8L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #1424 // =1424
+ mov x20, x3
+ mov x26, x2
+ str x26, [sp, #128] // 8-byte Folded Spill
+ ldur x19, [x20, #-8]
+ str x19, [sp, #136] // 8-byte Folded Spill
+ ldr x9, [x26]
+ mov x27, x1
+ stp x0, x27, [sp, #112]
+ sub x8, x29, #160 // =160
+ mov x0, x27
+ mov x1, x9
+ bl .LmulPv512x64
+ ldur x24, [x29, #-160]
+ ldur x8, [x29, #-96]
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldur x8, [x29, #-104]
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldur x8, [x29, #-112]
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldur x8, [x29, #-120]
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldur x8, [x29, #-128]
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldur x8, [x29, #-136]
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldur x8, [x29, #-144]
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldur x8, [x29, #-152]
+ str x8, [sp, #48] // 8-byte Folded Spill
+ mul x1, x24, x19
+ sub x8, x29, #240 // =240
+ mov x0, x20
+ bl .LmulPv512x64
+ ldur x8, [x29, #-176]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldur x8, [x29, #-184]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldur x8, [x29, #-192]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldp x19, x28, [x29, #-208]
+ ldp x21, x23, [x29, #-224]
+ ldp x25, x22, [x29, #-240]
+ ldr x1, [x26, #8]
+ add x8, sp, #1184 // =1184
+ mov x0, x27
+ bl .LmulPv512x64
+ cmn x25, x24
+ ldr x8, [sp, #1248]
+ ldr x9, [sp, #1240]
+ ldp x10, x12, [sp, #48]
+ adcs x10, x22, x10
+ ldr x11, [sp, #1232]
+ adcs x12, x21, x12
+ ldr x13, [sp, #1224]
+ ldp x14, x16, [sp, #64]
+ adcs x14, x23, x14
+ ldr x15, [sp, #1216]
+ adcs x16, x19, x16
+ ldr x17, [sp, #1208]
+ ldp x18, x1, [sp, #80]
+ adcs x18, x28, x18
+ ldr x0, [sp, #1192]
+ ldp x2, x4, [sp, #24]
+ adcs x1, x2, x1
+ ldr x2, [sp, #1184]
+ ldp x3, x5, [sp, #96]
+ adcs x3, x4, x3
+ ldr x4, [sp, #1200]
+ ldr x6, [sp, #40] // 8-byte Folded Reload
+ adcs x5, x6, x5
+ adds x19, x10, x2
+ adcs x10, x12, x0
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x4
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x27, [sp, #136] // 8-byte Folded Reload
+ mul x1, x19, x27
+ add x8, sp, #1104 // =1104
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #1168]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #1160]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1152]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #1144]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x23, [sp, #1136]
+ ldr x24, [sp, #1128]
+ ldr x25, [sp, #1120]
+ ldr x21, [sp, #1112]
+ ldr x26, [sp, #1104]
+ ldp x22, x28, [sp, #120]
+ ldr x1, [x28, #16]
+ add x8, sp, #1024 // =1024
+ mov x0, x22
+ bl .LmulPv512x64
+ cmn x19, x26
+ ldr x8, [sp, #1088]
+ ldr x9, [sp, #1080]
+ ldp x10, x18, [sp, #48]
+ adcs x10, x10, x21
+ ldr x11, [sp, #1072]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x25
+ ldr x13, [sp, #1064]
+ adcs x14, x14, x24
+ ldr x15, [sp, #1056]
+ ldr x16, [sp, #64] // 8-byte Folded Reload
+ adcs x16, x16, x23
+ ldr x17, [sp, #1048]
+ ldp x0, x2, [sp, #16]
+ adcs x18, x18, x0
+ ldr x0, [sp, #1032]
+ ldp x3, x1, [sp, #96]
+ adcs x1, x1, x2
+ ldr x2, [sp, #1024]
+ ldp x4, x6, [sp, #32]
+ adcs x3, x3, x4
+ ldr x4, [sp, #1040]
+ ldr x5, [sp, #88] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ adds x19, x10, x2
+ adcs x10, x12, x0
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x4
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ mul x1, x19, x27
+ add x8, sp, #944 // =944
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #1008]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #1000]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #992]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #984]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x23, [sp, #976]
+ ldr x24, [sp, #968]
+ ldr x25, [sp, #960]
+ ldr x21, [sp, #952]
+ ldr x26, [sp, #944]
+ ldr x1, [x28, #24]
+ add x8, sp, #864 // =864
+ mov x27, x22
+ mov x0, x27
+ bl .LmulPv512x64
+ cmn x19, x26
+ ldr x8, [sp, #928]
+ ldr x9, [sp, #920]
+ ldp x10, x18, [sp, #48]
+ adcs x10, x10, x21
+ ldr x11, [sp, #912]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x25
+ ldr x13, [sp, #904]
+ adcs x14, x14, x24
+ ldr x15, [sp, #896]
+ ldr x16, [sp, #64] // 8-byte Folded Reload
+ adcs x16, x16, x23
+ ldr x17, [sp, #888]
+ ldp x0, x2, [sp, #16]
+ adcs x18, x18, x0
+ ldr x0, [sp, #872]
+ ldp x3, x1, [sp, #96]
+ adcs x1, x1, x2
+ ldr x2, [sp, #864]
+ ldp x4, x6, [sp, #32]
+ adcs x3, x3, x4
+ ldr x4, [sp, #880]
+ ldr x5, [sp, #88] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ adds x19, x10, x2
+ adcs x10, x12, x0
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x4
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x28, [sp, #136] // 8-byte Folded Reload
+ mul x1, x19, x28
+ add x8, sp, #784 // =784
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #848]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #840]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #832]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #824]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x23, [sp, #816]
+ ldr x24, [sp, #808]
+ ldr x25, [sp, #800]
+ ldr x21, [sp, #792]
+ ldr x26, [sp, #784]
+ ldr x22, [sp, #128] // 8-byte Folded Reload
+ ldr x1, [x22, #32]
+ add x8, sp, #704 // =704
+ mov x0, x27
+ bl .LmulPv512x64
+ cmn x19, x26
+ ldr x8, [sp, #768]
+ ldr x9, [sp, #760]
+ ldp x10, x18, [sp, #48]
+ adcs x10, x10, x21
+ ldr x11, [sp, #752]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x25
+ ldr x13, [sp, #744]
+ adcs x14, x14, x24
+ ldr x15, [sp, #736]
+ ldr x16, [sp, #64] // 8-byte Folded Reload
+ adcs x16, x16, x23
+ ldr x17, [sp, #728]
+ ldp x0, x2, [sp, #16]
+ adcs x18, x18, x0
+ ldr x0, [sp, #712]
+ ldp x3, x1, [sp, #96]
+ adcs x1, x1, x2
+ ldr x2, [sp, #704]
+ ldp x4, x6, [sp, #32]
+ adcs x3, x3, x4
+ ldr x4, [sp, #720]
+ ldr x5, [sp, #88] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ adds x19, x10, x2
+ adcs x10, x12, x0
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x4
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ mul x1, x19, x28
+ add x8, sp, #624 // =624
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #688]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #680]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #672]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #664]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x23, [sp, #656]
+ ldr x24, [sp, #648]
+ ldr x25, [sp, #640]
+ ldr x21, [sp, #632]
+ ldr x26, [sp, #624]
+ mov x27, x22
+ ldr x1, [x27, #40]
+ add x8, sp, #544 // =544
+ ldr x28, [sp, #120] // 8-byte Folded Reload
+ mov x0, x28
+ bl .LmulPv512x64
+ cmn x19, x26
+ ldr x8, [sp, #608]
+ ldr x9, [sp, #600]
+ ldp x10, x18, [sp, #48]
+ adcs x10, x10, x21
+ ldr x11, [sp, #592]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x25
+ ldr x13, [sp, #584]
+ adcs x14, x14, x24
+ ldr x15, [sp, #576]
+ ldr x16, [sp, #64] // 8-byte Folded Reload
+ adcs x16, x16, x23
+ ldr x17, [sp, #568]
+ ldp x0, x2, [sp, #16]
+ adcs x18, x18, x0
+ ldr x0, [sp, #552]
+ ldp x3, x1, [sp, #96]
+ adcs x1, x1, x2
+ ldr x2, [sp, #544]
+ ldp x4, x6, [sp, #32]
+ adcs x3, x3, x4
+ ldr x4, [sp, #560]
+ ldr x5, [sp, #88] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ adds x19, x10, x2
+ adcs x10, x12, x0
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x4
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x22, [sp, #136] // 8-byte Folded Reload
+ mul x1, x19, x22
+ add x8, sp, #464 // =464
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #528]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #520]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #512]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldp x23, x8, [sp, #496]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldp x25, x24, [sp, #480]
+ ldp x26, x21, [sp, #464]
+ ldr x1, [x27, #48]
+ add x8, sp, #384 // =384
+ mov x0, x28
+ bl .LmulPv512x64
+ cmn x19, x26
+ ldp x9, x8, [sp, #440]
+ ldp x10, x18, [sp, #48]
+ adcs x10, x10, x21
+ ldp x13, x11, [sp, #424]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x25
+ adcs x14, x14, x24
+ ldp x17, x15, [sp, #408]
+ ldr x16, [sp, #64] // 8-byte Folded Reload
+ adcs x16, x16, x23
+ ldp x0, x2, [sp, #16]
+ adcs x18, x18, x0
+ ldp x3, x1, [sp, #96]
+ adcs x1, x1, x2
+ ldp x2, x0, [sp, #384]
+ ldp x4, x6, [sp, #32]
+ adcs x3, x3, x4
+ ldr x4, [sp, #400]
+ ldr x5, [sp, #88] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ adds x19, x10, x2
+ adcs x10, x12, x0
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x4
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x17
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x15
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x13
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x11
+ adcs x9, x5, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ mul x1, x19, x22
+ add x8, sp, #304 // =304
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x27, x8, [sp, #360]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldp x22, x28, [sp, #344]
+ ldp x24, x23, [sp, #328]
+ ldp x21, x25, [sp, #312]
+ ldr x26, [sp, #304]
+ ldp x0, x8, [sp, #120]
+ ldr x1, [x8, #56]
+ add x8, sp, #224 // =224
+ bl .LmulPv512x64
+ cmn x19, x26
+ ldp x9, x8, [sp, #280]
+ ldp x10, x18, [sp, #48]
+ adcs x10, x10, x21
+ ldp x13, x11, [sp, #264]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x25
+ adcs x14, x14, x24
+ ldp x17, x15, [sp, #248]
+ ldr x16, [sp, #64] // 8-byte Folded Reload
+ adcs x16, x16, x23
+ adcs x18, x18, x22
+ ldp x2, x0, [sp, #224]
+ ldp x3, x1, [sp, #96]
+ adcs x1, x1, x28
+ adcs x3, x3, x27
+ ldr x4, [sp, #240]
+ ldr x5, [sp, #88] // 8-byte Folded Reload
+ ldr x6, [sp, #40] // 8-byte Folded Reload
+ adcs x5, x5, x6
+ adds x19, x10, x2
+ adcs x21, x12, x0
+ adcs x22, x14, x4
+ adcs x23, x16, x17
+ adcs x24, x18, x15
+ adcs x25, x1, x13
+ adcs x26, x3, x11
+ adcs x27, x5, x9
+ adcs x28, x8, xzr
+ ldr x8, [sp, #136] // 8-byte Folded Reload
+ mul x1, x19, x8
+ add x8, sp, #144 // =144
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x15, x8, [sp, #200]
+ ldp x9, x10, [sp, #144]
+ ldp x11, x12, [sp, #160]
+ cmn x19, x9
+ ldp x13, x9, [sp, #176]
+ adcs x10, x21, x10
+ ldr x14, [sp, #192]
+ adcs x11, x22, x11
+ adcs x12, x23, x12
+ adcs x13, x24, x13
+ adcs x9, x25, x9
+ ldp x16, x17, [x20, #48]
+ ldp x18, x0, [x20, #32]
+ ldp x1, x2, [x20, #16]
+ ldp x3, x4, [x20]
+ adcs x14, x26, x14
+ adcs x15, x27, x15
+ adcs x8, x28, x8
+ subs x3, x10, x3
+ sbcs x4, x11, x4
+ sbcs x1, x12, x1
+ sbcs x2, x13, x2
+ sbcs x18, x9, x18
+ sbcs x0, x14, x0
+ sbcs x16, x15, x16
+ sbcs x17, x8, x17
+ cmp x17, #0 // =0
+ csel x10, x10, x3, lt
+ csel x11, x11, x4, lt
+ csel x12, x12, x1, lt
+ csel x13, x13, x2, lt
+ csel x9, x9, x18, lt
+ csel x14, x14, x0, lt
+ csel x15, x15, x16, lt
+ csel x8, x8, x17, lt
+ ldr x16, [sp, #112] // 8-byte Folded Reload
+ stp x10, x11, [x16]
+ stp x12, x13, [x16, #16]
+ stp x9, x14, [x16, #32]
+ stp x15, x8, [x16, #48]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end115:
+ .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L
+
+ .globl mcl_fp_montRed8L
+ .align 2
+ .type mcl_fp_montRed8L,@function
+mcl_fp_montRed8L: // @mcl_fp_montRed8L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #800 // =800
+ mov x20, x2
+ ldur x9, [x20, #-8]
+ str x9, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [x20, #48]
+ str x8, [sp, #144] // 8-byte Folded Spill
+ ldr x8, [x20, #56]
+ str x8, [sp, #152] // 8-byte Folded Spill
+ ldr x8, [x20, #32]
+ str x8, [sp, #120] // 8-byte Folded Spill
+ ldr x8, [x20, #40]
+ str x8, [sp, #128] // 8-byte Folded Spill
+ ldr x8, [x20, #16]
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldr x8, [x20, #24]
+ str x8, [sp, #112] // 8-byte Folded Spill
+ ldr x8, [x20]
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [x20, #8]
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldr x8, [x1, #112]
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [x1, #120]
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [x1, #96]
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldr x8, [x1, #104]
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [x1, #80]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [x1, #88]
+ str x8, [sp, #48] // 8-byte Folded Spill
+ ldp x28, x8, [x1, #64]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldp x22, x25, [x1, #48]
+ ldp x24, x19, [x1, #32]
+ ldp x27, x26, [x1, #16]
+ ldp x21, x23, [x1]
+ str x0, [sp, #136] // 8-byte Folded Spill
+ mul x1, x21, x9
+ sub x8, x29, #160 // =160
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x9, x8, [x29, #-104]
+ ldp x11, x10, [x29, #-120]
+ ldp x16, x12, [x29, #-136]
+ ldp x13, x14, [x29, #-160]
+ ldur x15, [x29, #-144]
+ cmn x21, x13
+ adcs x21, x23, x14
+ adcs x13, x27, x15
+ adcs x26, x26, x16
+ adcs x24, x24, x12
+ adcs x11, x19, x11
+ stp x11, x13, [sp, #8]
+ adcs x22, x22, x10
+ adcs x25, x25, x9
+ adcs x27, x28, x8
+ ldr x8, [sp, #24] // 8-byte Folded Reload
+ adcs x28, x8, xzr
+ ldp x19, x8, [sp, #32]
+ adcs x23, x8, xzr
+ ldr x8, [sp, #48] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #48] // 8-byte Folded Spill
+ ldr x8, [sp, #56] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ adcs x8, xzr, xzr
+ str x8, [sp, #40] // 8-byte Folded Spill
+ mul x1, x21, x19
+ sub x8, x29, #240 // =240
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x9, x8, [x29, #-184]
+ ldp x11, x10, [x29, #-200]
+ ldp x16, x12, [x29, #-216]
+ ldp x13, x14, [x29, #-240]
+ ldur x15, [x29, #-224]
+ cmn x21, x13
+ ldr x13, [sp, #16] // 8-byte Folded Reload
+ adcs x21, x13, x14
+ adcs x13, x26, x15
+ str x13, [sp, #24] // 8-byte Folded Spill
+ adcs x24, x24, x16
+ ldr x13, [sp, #8] // 8-byte Folded Reload
+ adcs x12, x13, x12
+ str x12, [sp, #16] // 8-byte Folded Spill
+ adcs x22, x22, x11
+ adcs x25, x25, x10
+ adcs x27, x27, x9
+ adcs x28, x28, x8
+ adcs x23, x23, xzr
+ ldr x8, [sp, #48] // 8-byte Folded Reload
+ adcs x26, x8, xzr
+ ldr x8, [sp, #56] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [sp, #40] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #48] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #560 // =560
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #624]
+ ldr x9, [sp, #616]
+ ldr x10, [sp, #608]
+ ldr x11, [sp, #600]
+ ldr x12, [sp, #592]
+ ldr x13, [sp, #560]
+ ldr x14, [sp, #568]
+ ldr x15, [sp, #576]
+ ldr x16, [sp, #584]
+ cmn x21, x13
+ ldr x13, [sp, #24] // 8-byte Folded Reload
+ adcs x21, x13, x14
+ adcs x13, x24, x15
+ str x13, [sp, #40] // 8-byte Folded Spill
+ ldr x13, [sp, #16] // 8-byte Folded Reload
+ adcs x13, x13, x16
+ str x13, [sp, #24] // 8-byte Folded Spill
+ adcs x22, x22, x12
+ adcs x25, x25, x11
+ adcs x27, x27, x10
+ adcs x28, x28, x9
+ adcs x23, x23, x8
+ adcs x26, x26, xzr
+ ldr x8, [sp, #56] // 8-byte Folded Reload
+ adcs x24, x8, xzr
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [sp, #48] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #56] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #480 // =480
+ mov x0, x20
+ bl .LmulPv512x64
+ ldr x8, [sp, #544]
+ ldr x9, [sp, #536]
+ ldr x10, [sp, #528]
+ ldr x11, [sp, #520]
+ ldr x12, [sp, #512]
+ ldp x13, x14, [sp, #480]
+ ldp x15, x16, [sp, #496]
+ cmn x21, x13
+ ldr x13, [sp, #40] // 8-byte Folded Reload
+ adcs x21, x13, x14
+ ldr x13, [sp, #24] // 8-byte Folded Reload
+ adcs x13, x13, x15
+ adcs x22, x22, x16
+ adcs x25, x25, x12
+ adcs x27, x27, x11
+ adcs x28, x28, x10
+ adcs x23, x23, x9
+ adcs x26, x26, x8
+ adcs x24, x24, xzr
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [sp, #56] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ stp x13, x8, [sp, #48]
+ mul x1, x21, x19
+ add x8, sp, #400 // =400
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x9, x8, [sp, #456]
+ ldp x11, x10, [sp, #440]
+ ldp x16, x12, [sp, #424]
+ ldp x13, x14, [sp, #400]
+ ldr x15, [sp, #416]
+ cmn x21, x13
+ ldr x13, [sp, #48] // 8-byte Folded Reload
+ adcs x21, x13, x14
+ adcs x13, x22, x15
+ str x13, [sp, #48] // 8-byte Folded Spill
+ adcs x25, x25, x16
+ adcs x27, x27, x12
+ adcs x28, x28, x11
+ adcs x23, x23, x10
+ adcs x26, x26, x9
+ adcs x24, x24, x8
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x22, x8, xzr
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [sp, #56] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #320 // =320
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x9, x8, [sp, #376]
+ ldp x11, x10, [sp, #360]
+ ldp x16, x12, [sp, #344]
+ ldp x13, x14, [sp, #320]
+ ldr x15, [sp, #336]
+ cmn x21, x13
+ ldr x13, [sp, #48] // 8-byte Folded Reload
+ adcs x21, x13, x14
+ adcs x13, x25, x15
+ adcs x27, x27, x16
+ adcs x28, x28, x12
+ adcs x23, x23, x11
+ adcs x26, x26, x10
+ adcs x24, x24, x9
+ ldr x9, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x9, x8
+ stp x13, x8, [sp, #56]
+ adcs x22, x22, xzr
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x25, x8, xzr
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #240 // =240
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x9, x8, [sp, #296]
+ ldp x11, x10, [sp, #280]
+ ldp x16, x12, [sp, #264]
+ ldp x13, x14, [sp, #240]
+ ldr x15, [sp, #256]
+ cmn x21, x13
+ ldr x13, [sp, #56] // 8-byte Folded Reload
+ adcs x21, x13, x14
+ adcs x13, x27, x15
+ adcs x28, x28, x16
+ adcs x23, x23, x12
+ adcs x26, x26, x11
+ adcs x24, x24, x10
+ ldr x10, [sp, #64] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ stp x9, x13, [sp, #64]
+ adcs x22, x22, x8
+ adcs x25, x25, xzr
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x27, x8, xzr
+ mul x1, x21, x19
+ add x8, sp, #160 // =160
+ mov x0, x20
+ bl .LmulPv512x64
+ ldp x9, x8, [sp, #216]
+ ldp x11, x10, [sp, #200]
+ ldp x16, x12, [sp, #184]
+ ldp x13, x14, [sp, #160]
+ ldr x15, [sp, #176]
+ cmn x21, x13
+ ldr x13, [sp, #72] // 8-byte Folded Reload
+ adcs x13, x13, x14
+ adcs x14, x28, x15
+ adcs x15, x23, x16
+ adcs x12, x26, x12
+ adcs x11, x24, x11
+ ldr x16, [sp, #64] // 8-byte Folded Reload
+ adcs x10, x16, x10
+ adcs x9, x22, x9
+ adcs x8, x25, x8
+ adcs x16, x27, xzr
+ ldp x17, x18, [sp, #88]
+ subs x17, x13, x17
+ sbcs x18, x14, x18
+ ldp x0, x1, [sp, #104]
+ sbcs x0, x15, x0
+ sbcs x1, x12, x1
+ ldp x2, x3, [sp, #120]
+ sbcs x2, x11, x2
+ sbcs x3, x10, x3
+ ldp x4, x5, [sp, #144]
+ sbcs x4, x9, x4
+ sbcs x5, x8, x5
+ sbcs x16, x16, xzr
+ tst x16, #0x1
+ csel x13, x13, x17, ne
+ csel x14, x14, x18, ne
+ csel x15, x15, x0, ne
+ csel x12, x12, x1, ne
+ csel x11, x11, x2, ne
+ csel x10, x10, x3, ne
+ csel x9, x9, x4, ne
+ csel x8, x8, x5, ne
+ ldr x16, [sp, #136] // 8-byte Folded Reload
+ stp x13, x14, [x16]
+ stp x15, x12, [x16, #16]
+ stp x11, x10, [x16, #32]
+ stp x9, x8, [x16, #48]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end116:
+ .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L
+
+ .globl mcl_fp_addPre8L
+ .align 2
+ .type mcl_fp_addPre8L,@function
+mcl_fp_addPre8L: // @mcl_fp_addPre8L
+// BB#0:
+ ldp x8, x9, [x2, #48]
+ ldp x10, x11, [x1, #48]
+ ldp x12, x13, [x2, #32]
+ ldp x14, x15, [x1, #32]
+ ldp x16, x17, [x2, #16]
+ ldp x18, x2, [x2]
+ ldp x3, x4, [x1]
+ ldp x5, x1, [x1, #16]
+ adds x18, x18, x3
+ str x18, [x0]
+ adcs x18, x2, x4
+ adcs x16, x16, x5
+ stp x18, x16, [x0, #8]
+ adcs x16, x17, x1
+ adcs x12, x12, x14
+ stp x16, x12, [x0, #24]
+ adcs x12, x13, x15
+ adcs x8, x8, x10
+ stp x12, x8, [x0, #40]
+ adcs x9, x9, x11
+ adcs x8, xzr, xzr
+ str x9, [x0, #56]
+ mov x0, x8
+ ret
+.Lfunc_end117:
+ .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L
+
+ .globl mcl_fp_subPre8L
+ .align 2
+ .type mcl_fp_subPre8L,@function
+mcl_fp_subPre8L: // @mcl_fp_subPre8L
+// BB#0:
+ ldp x8, x9, [x2, #48]
+ ldp x10, x11, [x1, #48]
+ ldp x12, x13, [x2, #32]
+ ldp x14, x15, [x1, #32]
+ ldp x16, x17, [x2, #16]
+ ldp x18, x2, [x2]
+ ldp x3, x4, [x1]
+ ldp x5, x1, [x1, #16]
+ subs x18, x3, x18
+ str x18, [x0]
+ sbcs x18, x4, x2
+ sbcs x16, x5, x16
+ stp x18, x16, [x0, #8]
+ sbcs x16, x1, x17
+ sbcs x12, x14, x12
+ stp x16, x12, [x0, #24]
+ sbcs x12, x15, x13
+ sbcs x8, x10, x8
+ stp x12, x8, [x0, #40]
+ sbcs x9, x11, x9
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ str x9, [x0, #56]
+ mov x0, x8
+ ret
+.Lfunc_end118:
+ .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L
+
+ .globl mcl_fp_shr1_8L
+ .align 2
+ .type mcl_fp_shr1_8L,@function
+mcl_fp_shr1_8L: // @mcl_fp_shr1_8L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x10, x11, [x1, #48]
+ ldp x12, x13, [x1, #16]
+ ldp x14, x15, [x1, #32]
+ extr x8, x9, x8, #1
+ extr x9, x12, x9, #1
+ extr x12, x13, x12, #1
+ extr x13, x14, x13, #1
+ extr x14, x15, x14, #1
+ extr x15, x10, x15, #1
+ extr x10, x11, x10, #1
+ lsr x11, x11, #1
+ stp x8, x9, [x0]
+ stp x12, x13, [x0, #16]
+ stp x14, x15, [x0, #32]
+ stp x10, x11, [x0, #48]
+ ret
+.Lfunc_end119:
+ .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L
+
+ .globl mcl_fp_add8L
+ .align 2
+ .type mcl_fp_add8L,@function
+mcl_fp_add8L: // @mcl_fp_add8L
+// BB#0:
+ stp x22, x21, [sp, #-32]!
+ stp x20, x19, [sp, #16]
+ ldp x8, x9, [x2, #48]
+ ldp x10, x11, [x1, #48]
+ ldp x12, x13, [x2, #32]
+ ldp x14, x15, [x1, #32]
+ ldp x16, x17, [x2, #16]
+ ldp x18, x2, [x2]
+ ldp x4, x5, [x1]
+ ldp x6, x1, [x1, #16]
+ adds x18, x18, x4
+ adcs x2, x2, x5
+ ldp x4, x5, [x3, #48]
+ adcs x16, x16, x6
+ adcs x17, x17, x1
+ ldp x1, x6, [x3, #32]
+ adcs x7, x12, x14
+ adcs x19, x13, x15
+ ldp x12, x13, [x3]
+ stp x18, x2, [x0]
+ stp x16, x17, [x0, #16]
+ stp x7, x19, [x0, #32]
+ adcs x8, x8, x10
+ adcs x20, x9, x11
+ stp x8, x20, [x0, #48]
+ adcs x21, xzr, xzr
+ ldp x9, x10, [x3, #16]
+ subs x15, x18, x12
+ sbcs x14, x2, x13
+ sbcs x13, x16, x9
+ sbcs x12, x17, x10
+ sbcs x11, x7, x1
+ sbcs x10, x19, x6
+ sbcs x9, x8, x4
+ sbcs x8, x20, x5
+ sbcs x16, x21, xzr
+ and w16, w16, #0x1
+ tbnz w16, #0, .LBB120_2
+// BB#1: // %nocarry
+ stp x15, x14, [x0]
+ stp x13, x12, [x0, #16]
+ stp x11, x10, [x0, #32]
+ stp x9, x8, [x0, #48]
+.LBB120_2: // %carry
+ ldp x20, x19, [sp, #16]
+ ldp x22, x21, [sp], #32
+ ret
+.Lfunc_end120:
+ .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L
+
+ .globl mcl_fp_addNF8L
+ .align 2
+ .type mcl_fp_addNF8L,@function
+mcl_fp_addNF8L: // @mcl_fp_addNF8L
+// BB#0:
+ ldp x8, x9, [x1, #48]
+ ldp x10, x11, [x2, #48]
+ ldp x12, x13, [x1, #32]
+ ldp x14, x15, [x2, #32]
+ ldp x16, x17, [x1, #16]
+ ldp x18, x1, [x1]
+ ldp x4, x5, [x2]
+ ldp x6, x2, [x2, #16]
+ adds x18, x4, x18
+ adcs x1, x5, x1
+ ldp x4, x5, [x3, #48]
+ adcs x16, x6, x16
+ adcs x17, x2, x17
+ ldp x2, x6, [x3, #32]
+ adcs x12, x14, x12
+ adcs x13, x15, x13
+ ldp x14, x15, [x3]
+ adcs x8, x10, x8
+ ldp x10, x3, [x3, #16]
+ adcs x9, x11, x9
+ subs x11, x18, x14
+ sbcs x14, x1, x15
+ sbcs x10, x16, x10
+ sbcs x15, x17, x3
+ sbcs x2, x12, x2
+ sbcs x3, x13, x6
+ sbcs x4, x8, x4
+ sbcs x5, x9, x5
+ cmp x5, #0 // =0
+ csel x11, x18, x11, lt
+ csel x14, x1, x14, lt
+ csel x10, x16, x10, lt
+ csel x15, x17, x15, lt
+ csel x12, x12, x2, lt
+ csel x13, x13, x3, lt
+ csel x8, x8, x4, lt
+ csel x9, x9, x5, lt
+ stp x11, x14, [x0]
+ stp x10, x15, [x0, #16]
+ stp x12, x13, [x0, #32]
+ stp x8, x9, [x0, #48]
+ ret
+.Lfunc_end121:
+ .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L
+
+ .globl mcl_fp_sub8L
+ .align 2
+ .type mcl_fp_sub8L,@function
+mcl_fp_sub8L: // @mcl_fp_sub8L
+// BB#0:
+ ldp x14, x15, [x2, #48]
+ ldp x16, x17, [x1, #48]
+ ldp x12, x13, [x2, #32]
+ ldp x18, x4, [x1, #32]
+ ldp x10, x11, [x2, #16]
+ ldp x8, x9, [x2]
+ ldp x2, x5, [x1]
+ ldp x6, x1, [x1, #16]
+ subs x8, x2, x8
+ sbcs x9, x5, x9
+ stp x8, x9, [x0]
+ sbcs x10, x6, x10
+ sbcs x11, x1, x11
+ stp x10, x11, [x0, #16]
+ sbcs x12, x18, x12
+ sbcs x13, x4, x13
+ stp x12, x13, [x0, #32]
+ sbcs x14, x16, x14
+ sbcs x15, x17, x15
+ stp x14, x15, [x0, #48]
+ ngcs x16, xzr
+ and w16, w16, #0x1
+ tbnz w16, #0, .LBB122_2
+// BB#1: // %nocarry
+ ret
+.LBB122_2: // %carry
+ ldp x16, x17, [x3, #48]
+ ldp x18, x1, [x3]
+ ldp x2, x4, [x3, #16]
+ ldp x5, x3, [x3, #32]
+ adds x8, x18, x8
+ adcs x9, x1, x9
+ adcs x10, x2, x10
+ adcs x11, x4, x11
+ adcs x12, x5, x12
+ adcs x13, x3, x13
+ adcs x14, x16, x14
+ adcs x15, x17, x15
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ stp x12, x13, [x0, #32]
+ stp x14, x15, [x0, #48]
+ ret
+.Lfunc_end122:
+ .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L
+
+ .globl mcl_fp_subNF8L
+ .align 2
+ .type mcl_fp_subNF8L,@function
+mcl_fp_subNF8L: // @mcl_fp_subNF8L
+// BB#0:
+ ldp x8, x9, [x2, #48]
+ ldp x10, x11, [x1, #48]
+ ldp x12, x13, [x2, #32]
+ ldp x14, x15, [x1, #32]
+ ldp x16, x17, [x2, #16]
+ ldp x18, x2, [x2]
+ ldp x4, x5, [x1]
+ ldp x6, x1, [x1, #16]
+ subs x18, x4, x18
+ sbcs x2, x5, x2
+ ldp x4, x5, [x3, #48]
+ sbcs x16, x6, x16
+ sbcs x17, x1, x17
+ ldp x1, x6, [x3, #32]
+ sbcs x12, x14, x12
+ sbcs x13, x15, x13
+ ldp x14, x15, [x3, #16]
+ sbcs x8, x10, x8
+ ldp x10, x3, [x3]
+ sbcs x9, x11, x9
+ asr x11, x9, #63
+ and x10, x11, x10
+ and x3, x11, x3
+ and x14, x11, x14
+ and x15, x11, x15
+ and x1, x11, x1
+ and x6, x11, x6
+ and x4, x11, x4
+ and x11, x11, x5
+ adds x10, x10, x18
+ str x10, [x0]
+ adcs x10, x3, x2
+ str x10, [x0, #8]
+ adcs x10, x14, x16
+ str x10, [x0, #16]
+ adcs x10, x15, x17
+ str x10, [x0, #24]
+ adcs x10, x1, x12
+ str x10, [x0, #32]
+ adcs x10, x6, x13
+ adcs x8, x4, x8
+ stp x10, x8, [x0, #40]
+ adcs x8, x11, x9
+ str x8, [x0, #56]
+ ret
+.Lfunc_end123:
+ .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L
+
+ .globl mcl_fpDbl_add8L
+ .align 2
+ .type mcl_fpDbl_add8L,@function
+mcl_fpDbl_add8L: // @mcl_fpDbl_add8L
+// BB#0:
+ ldp x8, x9, [x2, #112]
+ ldp x10, x11, [x1, #112]
+ ldp x12, x13, [x2, #96]
+ ldp x14, x15, [x1, #96]
+ ldp x16, x5, [x2]
+ ldp x17, x6, [x1]
+ ldp x18, x4, [x2, #80]
+ adds x16, x16, x17
+ ldr x17, [x1, #16]
+ str x16, [x0]
+ adcs x16, x5, x6
+ ldp x5, x6, [x2, #16]
+ str x16, [x0, #8]
+ adcs x17, x5, x17
+ ldp x16, x5, [x1, #24]
+ str x17, [x0, #16]
+ adcs x16, x6, x16
+ ldp x17, x6, [x2, #32]
+ str x16, [x0, #24]
+ adcs x17, x17, x5
+ ldp x16, x5, [x1, #40]
+ str x17, [x0, #32]
+ adcs x16, x6, x16
+ ldp x17, x6, [x2, #48]
+ str x16, [x0, #40]
+ ldr x16, [x1, #56]
+ adcs x17, x17, x5
+ ldp x5, x2, [x2, #64]
+ str x17, [x0, #48]
+ adcs x16, x6, x16
+ ldp x17, x6, [x1, #64]
+ str x16, [x0, #56]
+ ldp x16, x1, [x1, #80]
+ adcs x17, x5, x17
+ adcs x2, x2, x6
+ ldp x5, x6, [x3, #48]
+ adcs x16, x18, x16
+ adcs x18, x4, x1
+ ldp x1, x4, [x3, #32]
+ adcs x12, x12, x14
+ adcs x13, x13, x15
+ ldp x14, x15, [x3, #16]
+ adcs x8, x8, x10
+ ldp x10, x3, [x3]
+ adcs x9, x9, x11
+ adcs x11, xzr, xzr
+ subs x10, x17, x10
+ sbcs x3, x2, x3
+ sbcs x14, x16, x14
+ sbcs x15, x18, x15
+ sbcs x1, x12, x1
+ sbcs x4, x13, x4
+ sbcs x5, x8, x5
+ sbcs x6, x9, x6
+ sbcs x11, x11, xzr
+ tst x11, #0x1
+ csel x10, x17, x10, ne
+ csel x11, x2, x3, ne
+ csel x14, x16, x14, ne
+ csel x15, x18, x15, ne
+ csel x12, x12, x1, ne
+ csel x13, x13, x4, ne
+ csel x8, x8, x5, ne
+ csel x9, x9, x6, ne
+ stp x10, x11, [x0, #64]
+ stp x14, x15, [x0, #80]
+ stp x12, x13, [x0, #96]
+ stp x8, x9, [x0, #112]
+ ret
+.Lfunc_end124:
+ .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L
+
+ .globl mcl_fpDbl_sub8L
+ .align 2
+ .type mcl_fpDbl_sub8L,@function
+mcl_fpDbl_sub8L: // @mcl_fpDbl_sub8L
+// BB#0:
+ ldp x10, x8, [x2, #112]
+ ldp x11, x9, [x1, #112]
+ ldp x12, x13, [x2, #96]
+ ldp x14, x15, [x1, #96]
+ ldp x16, x5, [x1]
+ ldp x17, x4, [x2]
+ ldr x18, [x1, #80]
+ subs x16, x16, x17
+ ldr x17, [x1, #16]
+ str x16, [x0]
+ sbcs x16, x5, x4
+ ldp x4, x5, [x2, #16]
+ str x16, [x0, #8]
+ sbcs x17, x17, x4
+ ldp x16, x4, [x1, #24]
+ str x17, [x0, #16]
+ sbcs x16, x16, x5
+ ldp x17, x5, [x2, #32]
+ str x16, [x0, #24]
+ sbcs x17, x4, x17
+ ldp x16, x4, [x1, #40]
+ str x17, [x0, #32]
+ sbcs x16, x16, x5
+ ldp x17, x5, [x2, #48]
+ str x16, [x0, #40]
+ sbcs x17, x4, x17
+ ldp x16, x4, [x1, #56]
+ str x17, [x0, #48]
+ sbcs x16, x16, x5
+ ldp x17, x5, [x2, #64]
+ str x16, [x0, #56]
+ ldr x16, [x1, #72]
+ sbcs x17, x4, x17
+ ldp x4, x2, [x2, #80]
+ ldr x1, [x1, #88]
+ sbcs x16, x16, x5
+ sbcs x18, x18, x4
+ ldp x4, x5, [x3, #48]
+ sbcs x1, x1, x2
+ sbcs x12, x14, x12
+ ldp x14, x2, [x3, #32]
+ sbcs x13, x15, x13
+ sbcs x10, x11, x10
+ ldp x11, x15, [x3, #16]
+ sbcs x8, x9, x8
+ ngcs x9, xzr
+ tst x9, #0x1
+ ldp x9, x3, [x3]
+ csel x5, x5, xzr, ne
+ csel x4, x4, xzr, ne
+ csel x2, x2, xzr, ne
+ csel x14, x14, xzr, ne
+ csel x15, x15, xzr, ne
+ csel x11, x11, xzr, ne
+ csel x3, x3, xzr, ne
+ csel x9, x9, xzr, ne
+ adds x9, x9, x17
+ str x9, [x0, #64]
+ adcs x9, x3, x16
+ str x9, [x0, #72]
+ adcs x9, x11, x18
+ str x9, [x0, #80]
+ adcs x9, x15, x1
+ str x9, [x0, #88]
+ adcs x9, x14, x12
+ str x9, [x0, #96]
+ adcs x9, x2, x13
+ str x9, [x0, #104]
+ adcs x9, x4, x10
+ adcs x8, x5, x8
+ stp x9, x8, [x0, #112]
+ ret
+.Lfunc_end125:
+ .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L
+
+ .align 2
+ .type .LmulPv576x64,@function
+.LmulPv576x64: // @mulPv576x64
+// BB#0:
+ ldr x9, [x0]
+ mul x10, x9, x1
+ str x10, [x8]
+ ldr x10, [x0, #8]
+ umulh x9, x9, x1
+ mul x11, x10, x1
+ adds x9, x9, x11
+ str x9, [x8, #8]
+ ldr x9, [x0, #16]
+ umulh x10, x10, x1
+ mul x11, x9, x1
+ adcs x10, x10, x11
+ str x10, [x8, #16]
+ ldr x10, [x0, #24]
+ umulh x9, x9, x1
+ mul x11, x10, x1
+ adcs x9, x9, x11
+ str x9, [x8, #24]
+ ldr x9, [x0, #32]
+ umulh x10, x10, x1
+ mul x11, x9, x1
+ adcs x10, x10, x11
+ str x10, [x8, #32]
+ ldr x10, [x0, #40]
+ umulh x9, x9, x1
+ mul x11, x10, x1
+ adcs x9, x9, x11
+ str x9, [x8, #40]
+ ldr x9, [x0, #48]
+ umulh x10, x10, x1
+ mul x11, x9, x1
+ adcs x10, x10, x11
+ str x10, [x8, #48]
+ ldr x10, [x0, #56]
+ umulh x9, x9, x1
+ mul x11, x10, x1
+ adcs x9, x9, x11
+ str x9, [x8, #56]
+ ldr x9, [x0, #64]
+ umulh x10, x10, x1
+ mul x11, x9, x1
+ umulh x9, x9, x1
+ adcs x10, x10, x11
+ adcs x9, x9, xzr
+ stp x10, x9, [x8, #64]
+ ret
+.Lfunc_end126:
+ .size .LmulPv576x64, .Lfunc_end126-.LmulPv576x64
+
+ .globl mcl_fp_mulUnitPre9L
+ .align 2
+ .type mcl_fp_mulUnitPre9L,@function
+mcl_fp_mulUnitPre9L: // @mcl_fp_mulUnitPre9L
+// BB#0:
+ stp x20, x19, [sp, #-32]!
+ stp x29, x30, [sp, #16]
+ add x29, sp, #16 // =16
+ sub sp, sp, #80 // =80
+ mov x19, x0
+ mov x8, sp
+ mov x0, x1
+ mov x1, x2
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #64]
+ ldp x11, x10, [sp, #48]
+ ldp x13, x12, [sp, #32]
+ ldp x14, x15, [sp]
+ ldp x16, x17, [sp, #16]
+ stp x14, x15, [x19]
+ stp x16, x17, [x19, #16]
+ stp x13, x12, [x19, #32]
+ stp x11, x10, [x19, #48]
+ stp x9, x8, [x19, #64]
+ sub sp, x29, #16 // =16
+ ldp x29, x30, [sp, #16]
+ ldp x20, x19, [sp], #32
+ ret
+.Lfunc_end127:
+ .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L
+
+ .globl mcl_fpDbl_mulPre9L
+ .align 2
+ .type mcl_fpDbl_mulPre9L,@function
+mcl_fpDbl_mulPre9L: // @mcl_fpDbl_mulPre9L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #752 // =752
+ mov x21, x2
+ ldr x9, [x21]
+ mov x20, x1
+ mov x19, x0
+ sub x8, x29, #160 // =160
+ mov x0, x20
+ mov x1, x9
+ bl .LmulPv576x64
+ ldur x8, [x29, #-88]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldur x8, [x29, #-96]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldp x25, x24, [x29, #-112]
+ ldp x27, x26, [x29, #-128]
+ ldp x22, x28, [x29, #-144]
+ ldp x8, x23, [x29, #-160]
+ ldr x1, [x21, #8]
+ str x8, [x19]
+ sub x8, x29, #240 // =240
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [x29, #-176]
+ ldp x11, x10, [x29, #-192]
+ ldp x13, x12, [x29, #-208]
+ ldp x14, x16, [x29, #-240]
+ ldp x17, x15, [x29, #-224]
+ adds x14, x14, x23
+ str x14, [x19, #8]
+ adcs x22, x16, x22
+ adcs x23, x17, x28
+ adcs x27, x15, x27
+ adcs x26, x13, x26
+ adcs x25, x12, x25
+ adcs x24, x11, x24
+ ldr x1, [x21, #16]
+ ldr x11, [sp, #16] // 8-byte Folded Reload
+ adcs x28, x10, x11
+ ldr x10, [sp, #24] // 8-byte Folded Reload
+ adcs x9, x9, x10
+ adcs x8, x8, xzr
+ stp x8, x9, [sp, #16]
+ add x8, sp, #512 // =512
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #584]
+ ldr x9, [sp, #576]
+ ldr x10, [sp, #568]
+ ldr x11, [sp, #560]
+ ldr x12, [sp, #552]
+ ldr x13, [sp, #544]
+ ldr x14, [sp, #512]
+ ldr x15, [sp, #536]
+ ldr x16, [sp, #520]
+ ldr x17, [sp, #528]
+ adds x14, x22, x14
+ str x14, [x19, #16]
+ adcs x22, x23, x16
+ adcs x23, x27, x17
+ adcs x26, x26, x15
+ adcs x25, x25, x13
+ adcs x24, x24, x12
+ adcs x27, x28, x11
+ ldr x1, [x21, #24]
+ ldr x11, [sp, #24] // 8-byte Folded Reload
+ adcs x28, x11, x10
+ ldr x10, [sp, #16] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ adcs x8, x8, xzr
+ stp x8, x9, [sp, #16]
+ add x8, sp, #432 // =432
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #496]
+ ldp x11, x10, [sp, #480]
+ ldp x13, x12, [sp, #464]
+ ldp x14, x16, [sp, #432]
+ ldp x17, x15, [sp, #448]
+ adds x14, x22, x14
+ str x14, [x19, #24]
+ adcs x22, x23, x16
+ adcs x23, x26, x17
+ adcs x25, x25, x15
+ adcs x24, x24, x13
+ adcs x26, x27, x12
+ adcs x27, x28, x11
+ ldr x1, [x21, #32]
+ ldr x11, [sp, #24] // 8-byte Folded Reload
+ adcs x28, x11, x10
+ ldr x10, [sp, #16] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ adcs x8, x8, xzr
+ stp x8, x9, [sp, #16]
+ add x8, sp, #352 // =352
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #416]
+ ldp x11, x10, [sp, #400]
+ ldp x13, x12, [sp, #384]
+ ldp x14, x16, [sp, #352]
+ ldp x17, x15, [sp, #368]
+ adds x14, x22, x14
+ str x14, [x19, #32]
+ adcs x22, x23, x16
+ adcs x23, x25, x17
+ adcs x24, x24, x15
+ adcs x25, x26, x13
+ adcs x26, x27, x12
+ adcs x27, x28, x11
+ ldr x1, [x21, #40]
+ ldr x11, [sp, #24] // 8-byte Folded Reload
+ adcs x28, x11, x10
+ ldr x10, [sp, #16] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ adcs x8, x8, xzr
+ stp x8, x9, [sp, #16]
+ add x8, sp, #272 // =272
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #336]
+ ldp x11, x10, [sp, #320]
+ ldp x13, x12, [sp, #304]
+ ldp x14, x16, [sp, #272]
+ ldp x17, x15, [sp, #288]
+ adds x14, x22, x14
+ str x14, [x19, #40]
+ adcs x22, x23, x16
+ adcs x23, x24, x17
+ adcs x24, x25, x15
+ adcs x25, x26, x13
+ adcs x26, x27, x12
+ adcs x27, x28, x11
+ ldr x1, [x21, #48]
+ ldr x11, [sp, #24] // 8-byte Folded Reload
+ adcs x28, x11, x10
+ ldr x10, [sp, #16] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ adcs x8, x8, xzr
+ stp x8, x9, [sp, #16]
+ add x8, sp, #192 // =192
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #256]
+ ldp x11, x10, [sp, #240]
+ ldp x13, x12, [sp, #224]
+ ldp x14, x16, [sp, #192]
+ ldp x17, x15, [sp, #208]
+ adds x14, x22, x14
+ str x14, [x19, #48]
+ adcs x22, x23, x16
+ adcs x23, x24, x17
+ adcs x24, x25, x15
+ adcs x25, x26, x13
+ adcs x26, x27, x12
+ adcs x27, x28, x11
+ ldr x1, [x21, #56]
+ ldr x11, [sp, #24] // 8-byte Folded Reload
+ adcs x28, x11, x10
+ ldr x10, [sp, #16] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ adcs x8, x8, xzr
+ stp x8, x9, [sp, #16]
+ add x8, sp, #112 // =112
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #176]
+ ldp x11, x10, [sp, #160]
+ ldp x13, x12, [sp, #144]
+ ldp x14, x16, [sp, #112]
+ ldp x17, x15, [sp, #128]
+ adds x14, x22, x14
+ str x14, [x19, #56]
+ adcs x22, x23, x16
+ adcs x23, x24, x17
+ adcs x24, x25, x15
+ adcs x25, x26, x13
+ adcs x26, x27, x12
+ adcs x27, x28, x11
+ ldr x1, [x21, #64]
+ ldr x11, [sp, #24] // 8-byte Folded Reload
+ adcs x21, x11, x10
+ ldr x10, [sp, #16] // 8-byte Folded Reload
+ adcs x28, x10, x9
+ adcs x8, x8, xzr
+ str x8, [sp, #24] // 8-byte Folded Spill
+ add x8, sp, #32 // =32
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #96]
+ ldp x11, x10, [sp, #80]
+ ldp x13, x12, [sp, #64]
+ ldp x14, x16, [sp, #32]
+ ldp x17, x15, [sp, #48]
+ adds x14, x22, x14
+ str x14, [x19, #64]
+ adcs x14, x23, x16
+ str x14, [x19, #72]
+ adcs x14, x24, x17
+ str x14, [x19, #80]
+ adcs x14, x25, x15
+ adcs x13, x26, x13
+ stp x14, x13, [x19, #88]
+ adcs x12, x27, x12
+ adcs x11, x21, x11
+ stp x12, x11, [x19, #104]
+ adcs x10, x28, x10
+ str x10, [x19, #120]
+ ldr x10, [sp, #24] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ adcs x8, x8, xzr
+ stp x9, x8, [x19, #128]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end128:
+ .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L
+
+ .globl mcl_fpDbl_sqrPre9L
+ .align 2
+ .type mcl_fpDbl_sqrPre9L,@function
+mcl_fpDbl_sqrPre9L: // @mcl_fpDbl_sqrPre9L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #736 // =736
+ mov x20, x1
+ ldr x1, [x20]
+ mov x19, x0
+ sub x8, x29, #160 // =160
+ mov x0, x20
+ bl .LmulPv576x64
+ ldur x8, [x29, #-88]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldp x23, x22, [x29, #-104]
+ ldp x25, x24, [x29, #-120]
+ ldp x27, x26, [x29, #-136]
+ ldp x21, x28, [x29, #-152]
+ ldur x8, [x29, #-160]
+ ldr x1, [x20, #8]
+ str x8, [x19]
+ sub x8, x29, #240 // =240
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [x29, #-176]
+ ldp x11, x10, [x29, #-192]
+ ldp x13, x12, [x29, #-208]
+ ldp x14, x16, [x29, #-240]
+ ldp x17, x15, [x29, #-224]
+ adds x14, x14, x21
+ str x14, [x19, #8]
+ adcs x21, x16, x28
+ adcs x27, x17, x27
+ adcs x26, x15, x26
+ adcs x25, x13, x25
+ adcs x24, x12, x24
+ adcs x23, x11, x23
+ ldr x1, [x20, #16]
+ adcs x22, x10, x22
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x28, x9, x10
+ adcs x8, x8, xzr
+ str x8, [sp, #8] // 8-byte Folded Spill
+ add x8, sp, #496 // =496
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #568]
+ ldr x9, [sp, #560]
+ ldr x10, [sp, #552]
+ ldr x11, [sp, #544]
+ ldr x12, [sp, #536]
+ ldr x13, [sp, #528]
+ ldp x14, x16, [sp, #496]
+ ldr x15, [sp, #520]
+ ldr x17, [sp, #512]
+ adds x14, x21, x14
+ str x14, [x19, #16]
+ adcs x21, x27, x16
+ adcs x26, x26, x17
+ adcs x25, x25, x15
+ adcs x24, x24, x13
+ adcs x23, x23, x12
+ adcs x22, x22, x11
+ ldr x1, [x20, #24]
+ adcs x27, x28, x10
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x28, x10, x9
+ adcs x8, x8, xzr
+ str x8, [sp, #8] // 8-byte Folded Spill
+ add x8, sp, #416 // =416
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #480]
+ ldp x11, x10, [sp, #464]
+ ldp x13, x12, [sp, #448]
+ ldp x14, x16, [sp, #416]
+ ldp x17, x15, [sp, #432]
+ adds x14, x21, x14
+ str x14, [x19, #24]
+ adcs x21, x26, x16
+ adcs x25, x25, x17
+ adcs x24, x24, x15
+ adcs x23, x23, x13
+ adcs x22, x22, x12
+ adcs x26, x27, x11
+ ldr x1, [x20, #32]
+ adcs x27, x28, x10
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x28, x10, x9
+ adcs x8, x8, xzr
+ str x8, [sp, #8] // 8-byte Folded Spill
+ add x8, sp, #336 // =336
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #400]
+ ldp x11, x10, [sp, #384]
+ ldp x13, x12, [sp, #368]
+ ldp x14, x16, [sp, #336]
+ ldp x17, x15, [sp, #352]
+ adds x14, x21, x14
+ str x14, [x19, #32]
+ adcs x21, x25, x16
+ adcs x24, x24, x17
+ adcs x23, x23, x15
+ adcs x22, x22, x13
+ adcs x25, x26, x12
+ adcs x26, x27, x11
+ ldr x1, [x20, #40]
+ adcs x27, x28, x10
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x28, x10, x9
+ adcs x8, x8, xzr
+ str x8, [sp, #8] // 8-byte Folded Spill
+ add x8, sp, #256 // =256
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #320]
+ ldp x11, x10, [sp, #304]
+ ldp x13, x12, [sp, #288]
+ ldp x14, x16, [sp, #256]
+ ldp x17, x15, [sp, #272]
+ adds x14, x21, x14
+ str x14, [x19, #40]
+ adcs x21, x24, x16
+ adcs x23, x23, x17
+ adcs x22, x22, x15
+ adcs x24, x25, x13
+ adcs x25, x26, x12
+ adcs x26, x27, x11
+ ldr x1, [x20, #48]
+ adcs x27, x28, x10
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x28, x10, x9
+ adcs x8, x8, xzr
+ str x8, [sp, #8] // 8-byte Folded Spill
+ add x8, sp, #176 // =176
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #240]
+ ldp x11, x10, [sp, #224]
+ ldp x13, x12, [sp, #208]
+ ldp x14, x16, [sp, #176]
+ ldp x17, x15, [sp, #192]
+ adds x14, x21, x14
+ str x14, [x19, #48]
+ adcs x21, x23, x16
+ adcs x22, x22, x17
+ adcs x23, x24, x15
+ adcs x24, x25, x13
+ adcs x25, x26, x12
+ adcs x26, x27, x11
+ ldr x1, [x20, #56]
+ adcs x27, x28, x10
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x28, x10, x9
+ adcs x8, x8, xzr
+ str x8, [sp, #8] // 8-byte Folded Spill
+ add x8, sp, #96 // =96
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #160]
+ ldp x11, x10, [sp, #144]
+ ldp x13, x12, [sp, #128]
+ ldp x14, x16, [sp, #96]
+ ldp x17, x15, [sp, #112]
+ adds x14, x21, x14
+ str x14, [x19, #56]
+ adcs x21, x22, x16
+ adcs x22, x23, x17
+ adcs x23, x24, x15
+ adcs x24, x25, x13
+ adcs x25, x26, x12
+ adcs x26, x27, x11
+ ldr x1, [x20, #64]
+ adcs x27, x28, x10
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x28, x10, x9
+ adcs x8, x8, xzr
+ str x8, [sp, #8] // 8-byte Folded Spill
+ add x8, sp, #16 // =16
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #80]
+ ldp x11, x10, [sp, #64]
+ ldp x13, x12, [sp, #48]
+ ldp x14, x16, [sp, #16]
+ ldp x17, x15, [sp, #32]
+ adds x14, x21, x14
+ str x14, [x19, #64]
+ adcs x14, x22, x16
+ str x14, [x19, #72]
+ adcs x14, x23, x17
+ str x14, [x19, #80]
+ adcs x14, x24, x15
+ adcs x13, x25, x13
+ stp x14, x13, [x19, #88]
+ adcs x12, x26, x12
+ adcs x11, x27, x11
+ stp x12, x11, [x19, #104]
+ adcs x10, x28, x10
+ str x10, [x19, #120]
+ ldr x10, [sp, #8] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ adcs x8, x8, xzr
+ stp x9, x8, [x19, #128]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end129:
+ .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L
+
+ .globl mcl_fp_mont9L
+ .align 2
+ .type mcl_fp_mont9L,@function
+mcl_fp_mont9L: // @mcl_fp_mont9L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #1600 // =1600
+ mov x20, x3
+ mov x28, x2
+ str x28, [sp, #136] // 8-byte Folded Spill
+ ldur x19, [x20, #-8]
+ str x19, [sp, #144] // 8-byte Folded Spill
+ ldr x9, [x28]
+ mov x23, x1
+ str x23, [sp, #152] // 8-byte Folded Spill
+ str x0, [sp, #128] // 8-byte Folded Spill
+ sub x8, x29, #160 // =160
+ mov x0, x23
+ mov x1, x9
+ bl .LmulPv576x64
+ ldur x24, [x29, #-160]
+ ldur x8, [x29, #-88]
+ str x8, [sp, #120] // 8-byte Folded Spill
+ ldur x8, [x29, #-96]
+ str x8, [sp, #112] // 8-byte Folded Spill
+ ldur x8, [x29, #-104]
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldur x8, [x29, #-112]
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldur x8, [x29, #-120]
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldur x8, [x29, #-128]
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldur x8, [x29, #-136]
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldur x8, [x29, #-144]
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldur x8, [x29, #-152]
+ str x8, [sp, #48] // 8-byte Folded Spill
+ mul x1, x24, x19
+ sub x8, x29, #240 // =240
+ mov x0, x20
+ bl .LmulPv576x64
+ ldur x8, [x29, #-168]
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldur x8, [x29, #-176]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldur x8, [x29, #-184]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldur x8, [x29, #-192]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldp x21, x19, [x29, #-208]
+ ldp x26, x22, [x29, #-224]
+ ldp x27, x25, [x29, #-240]
+ ldr x1, [x28, #8]
+ add x8, sp, #1360 // =1360
+ mov x0, x23
+ bl .LmulPv576x64
+ cmn x27, x24
+ ldr x8, [sp, #1432]
+ ldr x9, [sp, #1424]
+ ldr x10, [sp, #48] // 8-byte Folded Reload
+ adcs x10, x25, x10
+ ldr x11, [sp, #1416]
+ ldp x12, x14, [sp, #64]
+ adcs x12, x26, x12
+ ldr x13, [sp, #1408]
+ adcs x14, x22, x14
+ ldr x15, [sp, #1400]
+ ldp x16, x18, [sp, #80]
+ adcs x16, x21, x16
+ ldr x17, [sp, #1392]
+ adcs x18, x19, x18
+ ldr x0, [sp, #1384]
+ ldp x1, x3, [sp, #96]
+ ldp x2, x4, [sp, #24]
+ adcs x1, x2, x1
+ ldr x2, [sp, #1376]
+ adcs x3, x4, x3
+ ldr x4, [sp, #1360]
+ ldp x5, x7, [sp, #112]
+ ldr x6, [sp, #40] // 8-byte Folded Reload
+ adcs x5, x6, x5
+ ldr x6, [sp, #1368]
+ ldr x19, [sp, #56] // 8-byte Folded Reload
+ adcs x7, x19, x7
+ adcs x19, xzr, xzr
+ adds x21, x10, x4
+ adcs x10, x12, x6
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x2
+ str x10, [sp, #104] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #96] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ adcs x8, x19, x8
+ stp x8, x9, [sp, #112]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #56]
+ ldr x24, [sp, #144] // 8-byte Folded Reload
+ mul x1, x21, x24
+ add x8, sp, #1280 // =1280
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #1352]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #1344]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1336]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #1328]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x26, [sp, #1320]
+ ldr x27, [sp, #1312]
+ ldr x28, [sp, #1304]
+ ldr x22, [sp, #1296]
+ ldr x19, [sp, #1288]
+ ldr x23, [sp, #1280]
+ ldr x25, [sp, #136] // 8-byte Folded Reload
+ ldr x1, [x25, #16]
+ add x8, sp, #1200 // =1200
+ ldr x0, [sp, #152] // 8-byte Folded Reload
+ bl .LmulPv576x64
+ cmn x21, x23
+ ldr x8, [sp, #1272]
+ ldr x9, [sp, #1264]
+ ldr x10, [sp, #48] // 8-byte Folded Reload
+ adcs x10, x10, x19
+ ldr x11, [sp, #1256]
+ ldp x14, x12, [sp, #96]
+ adcs x12, x12, x22
+ ldr x13, [sp, #1248]
+ adcs x14, x14, x28
+ ldr x15, [sp, #1240]
+ ldp x18, x16, [sp, #80]
+ adcs x16, x16, x27
+ ldr x17, [sp, #1232]
+ adcs x18, x18, x26
+ ldr x0, [sp, #1224]
+ ldp x3, x1, [sp, #64]
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldr x2, [sp, #1216]
+ adcs x3, x3, x4
+ ldr x4, [sp, #1200]
+ ldp x7, x5, [sp, #112]
+ ldp x6, x19, [sp, #32]
+ adcs x5, x5, x6
+ ldr x6, [sp, #1208]
+ adcs x7, x7, x19
+ ldr x19, [sp, #56] // 8-byte Folded Reload
+ adcs x19, x19, xzr
+ adds x21, x10, x4
+ adcs x10, x12, x6
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x2
+ str x10, [sp, #104] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #96] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ adcs x8, x19, x8
+ stp x8, x9, [sp, #112]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #56]
+ mul x1, x21, x24
+ add x8, sp, #1120 // =1120
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #1192]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #1184]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1176]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #1168]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x26, [sp, #1160]
+ ldr x27, [sp, #1152]
+ ldr x28, [sp, #1144]
+ ldr x22, [sp, #1136]
+ ldr x19, [sp, #1128]
+ ldr x23, [sp, #1120]
+ ldr x1, [x25, #24]
+ add x8, sp, #1040 // =1040
+ ldr x24, [sp, #152] // 8-byte Folded Reload
+ mov x0, x24
+ bl .LmulPv576x64
+ cmn x21, x23
+ ldr x8, [sp, #1112]
+ ldr x9, [sp, #1104]
+ ldr x10, [sp, #48] // 8-byte Folded Reload
+ adcs x10, x10, x19
+ ldr x11, [sp, #1096]
+ ldp x14, x12, [sp, #96]
+ adcs x12, x12, x22
+ ldr x13, [sp, #1088]
+ adcs x14, x14, x28
+ ldr x15, [sp, #1080]
+ ldp x18, x16, [sp, #80]
+ adcs x16, x16, x27
+ ldr x17, [sp, #1072]
+ adcs x18, x18, x26
+ ldr x0, [sp, #1064]
+ ldp x3, x1, [sp, #64]
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldr x2, [sp, #1056]
+ adcs x3, x3, x4
+ ldr x4, [sp, #1040]
+ ldp x7, x5, [sp, #112]
+ ldp x6, x19, [sp, #32]
+ adcs x5, x5, x6
+ ldr x6, [sp, #1048]
+ adcs x7, x7, x19
+ ldr x19, [sp, #56] // 8-byte Folded Reload
+ adcs x19, x19, xzr
+ adds x21, x10, x4
+ adcs x10, x12, x6
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x2
+ str x10, [sp, #104] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #96] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ adcs x8, x19, x8
+ stp x8, x9, [sp, #112]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #56]
+ ldr x8, [sp, #144] // 8-byte Folded Reload
+ mul x1, x21, x8
+ add x8, sp, #960 // =960
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #1032]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #1024]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1016]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #1008]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x26, [sp, #1000]
+ ldr x27, [sp, #992]
+ ldr x28, [sp, #984]
+ ldr x22, [sp, #976]
+ ldr x19, [sp, #968]
+ ldr x23, [sp, #960]
+ ldr x1, [x25, #32]
+ add x8, sp, #880 // =880
+ mov x0, x24
+ bl .LmulPv576x64
+ cmn x21, x23
+ ldr x8, [sp, #952]
+ ldr x9, [sp, #944]
+ ldr x10, [sp, #48] // 8-byte Folded Reload
+ adcs x10, x10, x19
+ ldr x11, [sp, #936]
+ ldp x14, x12, [sp, #96]
+ adcs x12, x12, x22
+ ldr x13, [sp, #928]
+ adcs x14, x14, x28
+ ldr x15, [sp, #920]
+ ldp x18, x16, [sp, #80]
+ adcs x16, x16, x27
+ ldr x17, [sp, #912]
+ adcs x18, x18, x26
+ ldr x0, [sp, #904]
+ ldp x3, x1, [sp, #64]
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldr x2, [sp, #896]
+ adcs x3, x3, x4
+ ldr x4, [sp, #880]
+ ldp x7, x5, [sp, #112]
+ ldp x6, x19, [sp, #32]
+ adcs x5, x5, x6
+ ldr x6, [sp, #888]
+ adcs x7, x7, x19
+ ldr x19, [sp, #56] // 8-byte Folded Reload
+ adcs x19, x19, xzr
+ adds x21, x10, x4
+ adcs x10, x12, x6
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x2
+ str x10, [sp, #104] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #96] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ adcs x8, x19, x8
+ stp x8, x9, [sp, #112]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #56]
+ ldr x25, [sp, #144] // 8-byte Folded Reload
+ mul x1, x21, x25
+ add x8, sp, #800 // =800
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #872]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #864]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #856]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #848]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x26, [sp, #840]
+ ldr x27, [sp, #832]
+ ldr x28, [sp, #824]
+ ldr x22, [sp, #816]
+ ldr x19, [sp, #808]
+ ldr x23, [sp, #800]
+ ldr x24, [sp, #136] // 8-byte Folded Reload
+ ldr x1, [x24, #40]
+ add x8, sp, #720 // =720
+ ldr x0, [sp, #152] // 8-byte Folded Reload
+ bl .LmulPv576x64
+ cmn x21, x23
+ ldr x8, [sp, #792]
+ ldr x9, [sp, #784]
+ ldr x10, [sp, #48] // 8-byte Folded Reload
+ adcs x10, x10, x19
+ ldr x11, [sp, #776]
+ ldp x14, x12, [sp, #96]
+ adcs x12, x12, x22
+ ldr x13, [sp, #768]
+ adcs x14, x14, x28
+ ldr x15, [sp, #760]
+ ldp x18, x16, [sp, #80]
+ adcs x16, x16, x27
+ ldr x17, [sp, #752]
+ adcs x18, x18, x26
+ ldr x0, [sp, #744]
+ ldp x3, x1, [sp, #64]
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldr x2, [sp, #736]
+ adcs x3, x3, x4
+ ldr x4, [sp, #720]
+ ldp x7, x5, [sp, #112]
+ ldp x6, x19, [sp, #32]
+ adcs x5, x5, x6
+ ldr x6, [sp, #728]
+ adcs x7, x7, x19
+ ldr x19, [sp, #56] // 8-byte Folded Reload
+ adcs x19, x19, xzr
+ adds x21, x10, x4
+ adcs x10, x12, x6
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x2
+ str x10, [sp, #104] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #96] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ adcs x8, x19, x8
+ stp x8, x9, [sp, #112]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #56]
+ mul x1, x21, x25
+ add x8, sp, #640 // =640
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #712]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #704]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #696]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #688]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x26, [sp, #680]
+ ldr x27, [sp, #672]
+ ldr x28, [sp, #664]
+ ldr x22, [sp, #656]
+ ldr x19, [sp, #648]
+ ldr x23, [sp, #640]
+ ldr x1, [x24, #48]
+ add x8, sp, #560 // =560
+ ldr x25, [sp, #152] // 8-byte Folded Reload
+ mov x0, x25
+ bl .LmulPv576x64
+ cmn x21, x23
+ ldr x8, [sp, #632]
+ ldr x9, [sp, #624]
+ ldr x10, [sp, #48] // 8-byte Folded Reload
+ adcs x10, x10, x19
+ ldr x11, [sp, #616]
+ ldp x14, x12, [sp, #96]
+ adcs x12, x12, x22
+ ldr x13, [sp, #608]
+ adcs x14, x14, x28
+ ldr x15, [sp, #600]
+ ldp x18, x16, [sp, #80]
+ adcs x16, x16, x27
+ ldr x17, [sp, #592]
+ adcs x18, x18, x26
+ ldr x0, [sp, #584]
+ ldp x3, x1, [sp, #64]
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldr x2, [sp, #576]
+ adcs x3, x3, x4
+ ldr x4, [sp, #560]
+ ldp x7, x5, [sp, #112]
+ ldp x6, x19, [sp, #32]
+ adcs x5, x5, x6
+ ldr x6, [sp, #568]
+ adcs x7, x7, x19
+ ldr x19, [sp, #56] // 8-byte Folded Reload
+ adcs x19, x19, xzr
+ adds x21, x10, x4
+ adcs x10, x12, x6
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x2
+ str x10, [sp, #104] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #96] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ adcs x8, x19, x8
+ stp x8, x9, [sp, #112]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #56]
+ ldr x24, [sp, #144] // 8-byte Folded Reload
+ mul x1, x21, x24
+ add x8, sp, #480 // =480
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #552]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [sp, #544]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #536]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #528]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x26, [sp, #520]
+ ldr x27, [sp, #512]
+ ldp x22, x28, [sp, #496]
+ ldp x23, x19, [sp, #480]
+ ldr x8, [sp, #136] // 8-byte Folded Reload
+ ldr x1, [x8, #56]
+ add x8, sp, #400 // =400
+ mov x0, x25
+ bl .LmulPv576x64
+ cmn x21, x23
+ ldp x9, x8, [sp, #464]
+ ldr x10, [sp, #48] // 8-byte Folded Reload
+ adcs x10, x10, x19
+ ldp x13, x11, [sp, #448]
+ ldp x14, x12, [sp, #96]
+ adcs x12, x12, x22
+ adcs x14, x14, x28
+ ldp x17, x15, [sp, #432]
+ ldp x18, x16, [sp, #80]
+ adcs x16, x16, x27
+ adcs x18, x18, x26
+ ldp x3, x1, [sp, #64]
+ ldp x2, x4, [sp, #16]
+ adcs x1, x1, x2
+ ldp x2, x0, [sp, #416]
+ adcs x3, x3, x4
+ ldp x7, x5, [sp, #112]
+ ldp x6, x19, [sp, #32]
+ adcs x5, x5, x6
+ ldp x4, x6, [sp, #400]
+ adcs x7, x7, x19
+ ldr x19, [sp, #56] // 8-byte Folded Reload
+ adcs x19, x19, xzr
+ adds x21, x10, x4
+ adcs x10, x12, x6
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x14, x2
+ str x10, [sp, #104] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #96] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #88] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ adcs x8, x19, x8
+ stp x8, x9, [sp, #112]
+ adcs x8, xzr, xzr
+ stp x8, x10, [sp, #56]
+ mul x1, x21, x24
+ add x8, sp, #320 // =320
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #392]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldp x24, x8, [sp, #376]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldp x26, x25, [sp, #360]
+ ldp x28, x27, [sp, #344]
+ ldp x19, x22, [sp, #328]
+ ldr x23, [sp, #320]
+ ldr x8, [sp, #136] // 8-byte Folded Reload
+ ldr x1, [x8, #64]
+ add x8, sp, #240 // =240
+ ldr x0, [sp, #152] // 8-byte Folded Reload
+ bl .LmulPv576x64
+ cmn x21, x23
+ ldp x9, x8, [sp, #304]
+ ldr x10, [sp, #48] // 8-byte Folded Reload
+ adcs x10, x10, x19
+ ldp x13, x11, [sp, #288]
+ ldp x14, x12, [sp, #96]
+ adcs x12, x12, x22
+ adcs x14, x14, x28
+ ldp x17, x15, [sp, #272]
+ ldp x18, x16, [sp, #80]
+ adcs x16, x16, x27
+ adcs x18, x18, x26
+ ldp x2, x0, [sp, #256]
+ ldp x3, x1, [sp, #64]
+ adcs x1, x1, x25
+ adcs x3, x3, x24
+ ldp x7, x5, [sp, #112]
+ ldp x6, x19, [sp, #32]
+ adcs x5, x5, x6
+ ldp x4, x6, [sp, #240]
+ adcs x7, x7, x19
+ ldr x19, [sp, #56] // 8-byte Folded Reload
+ adcs x19, x19, xzr
+ adds x21, x10, x4
+ adcs x22, x12, x6
+ adcs x23, x14, x2
+ adcs x24, x16, x0
+ adcs x25, x18, x17
+ adcs x26, x1, x15
+ adcs x27, x3, x13
+ adcs x10, x5, x11
+ str x10, [sp, #152] // 8-byte Folded Spill
+ adcs x9, x7, x9
+ str x9, [sp, #136] // 8-byte Folded Spill
+ adcs x19, x19, x8
+ adcs x28, xzr, xzr
+ ldr x8, [sp, #144] // 8-byte Folded Reload
+ mul x1, x21, x8
+ add x8, sp, #160 // =160
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x16, x8, [sp, #224]
+ ldp x9, x10, [sp, #160]
+ ldp x11, x12, [sp, #176]
+ cmn x21, x9
+ ldp x13, x9, [sp, #192]
+ adcs x10, x22, x10
+ ldp x14, x15, [sp, #208]
+ adcs x11, x23, x11
+ adcs x12, x24, x12
+ adcs x13, x25, x13
+ adcs x9, x26, x9
+ adcs x14, x27, x14
+ ldp x0, x17, [x20, #56]
+ ldp x2, x18, [x20, #40]
+ ldp x4, x1, [x20, #24]
+ ldp x6, x3, [x20, #8]
+ ldr x5, [x20]
+ ldr x7, [sp, #152] // 8-byte Folded Reload
+ adcs x15, x7, x15
+ ldr x7, [sp, #136] // 8-byte Folded Reload
+ adcs x16, x7, x16
+ adcs x8, x19, x8
+ adcs x7, x28, xzr
+ subs x5, x10, x5
+ sbcs x6, x11, x6
+ sbcs x3, x12, x3
+ sbcs x4, x13, x4
+ sbcs x1, x9, x1
+ sbcs x2, x14, x2
+ sbcs x18, x15, x18
+ sbcs x0, x16, x0
+ sbcs x17, x8, x17
+ sbcs x7, x7, xzr
+ tst x7, #0x1
+ csel x10, x10, x5, ne
+ csel x11, x11, x6, ne
+ csel x12, x12, x3, ne
+ csel x13, x13, x4, ne
+ csel x9, x9, x1, ne
+ csel x14, x14, x2, ne
+ csel x15, x15, x18, ne
+ csel x16, x16, x0, ne
+ csel x8, x8, x17, ne
+ ldr x17, [sp, #128] // 8-byte Folded Reload
+ stp x10, x11, [x17]
+ stp x12, x13, [x17, #16]
+ stp x9, x14, [x17, #32]
+ stp x15, x16, [x17, #48]
+ str x8, [x17, #64]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end130:
+ .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L
+
+ .globl mcl_fp_montNF9L
+ .align 2
+ .type mcl_fp_montNF9L,@function
+mcl_fp_montNF9L: // @mcl_fp_montNF9L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #1584 // =1584
+ mov x20, x3
+ mov x28, x2
+ str x28, [sp, #120] // 8-byte Folded Spill
+ ldur x19, [x20, #-8]
+ str x19, [sp, #128] // 8-byte Folded Spill
+ ldr x9, [x28]
+ mov x23, x1
+ str x23, [sp, #136] // 8-byte Folded Spill
+ str x0, [sp, #112] // 8-byte Folded Spill
+ sub x8, x29, #160 // =160
+ mov x0, x23
+ mov x1, x9
+ bl .LmulPv576x64
+ ldur x24, [x29, #-160]
+ ldur x8, [x29, #-88]
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldur x8, [x29, #-96]
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldur x8, [x29, #-104]
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldur x8, [x29, #-112]
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldur x8, [x29, #-120]
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldur x8, [x29, #-128]
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldur x8, [x29, #-136]
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldur x8, [x29, #-144]
+ str x8, [sp, #48] // 8-byte Folded Spill
+ ldur x8, [x29, #-152]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ mul x1, x24, x19
+ sub x8, x29, #240 // =240
+ mov x0, x20
+ bl .LmulPv576x64
+ ldur x8, [x29, #-168]
+ str x8, [sp, #40] // 8-byte Folded Spill
+ ldur x8, [x29, #-176]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldur x8, [x29, #-184]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldur x8, [x29, #-192]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldp x21, x19, [x29, #-208]
+ ldp x26, x22, [x29, #-224]
+ ldp x27, x25, [x29, #-240]
+ ldr x1, [x28, #8]
+ add x8, sp, #1344 // =1344
+ mov x0, x23
+ bl .LmulPv576x64
+ cmn x27, x24
+ ldr x8, [sp, #1416]
+ ldr x9, [sp, #1408]
+ ldr x10, [sp, #32] // 8-byte Folded Reload
+ adcs x10, x25, x10
+ ldr x11, [sp, #1400]
+ ldp x12, x14, [sp, #48]
+ adcs x12, x26, x12
+ ldr x13, [sp, #1392]
+ adcs x14, x22, x14
+ ldr x15, [sp, #1384]
+ ldp x16, x18, [sp, #64]
+ adcs x16, x21, x16
+ ldr x17, [sp, #1376]
+ adcs x18, x19, x18
+ ldr x0, [sp, #1368]
+ ldp x1, x3, [sp, #80]
+ ldp x2, x4, [sp, #8]
+ adcs x1, x2, x1
+ ldr x2, [sp, #1352]
+ adcs x3, x4, x3
+ ldr x4, [sp, #1344]
+ ldp x5, x7, [sp, #96]
+ ldr x6, [sp, #24] // 8-byte Folded Reload
+ adcs x5, x6, x5
+ ldr x6, [sp, #1360]
+ ldr x19, [sp, #40] // 8-byte Folded Reload
+ adcs x7, x19, x7
+ adds x19, x10, x4
+ adcs x10, x12, x2
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x6
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x22, [sp, #128] // 8-byte Folded Reload
+ mul x1, x19, x22
+ add x8, sp, #1264 // =1264
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #1336]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1328]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #1320]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #1312]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldr x24, [sp, #1304]
+ ldr x25, [sp, #1296]
+ ldr x26, [sp, #1288]
+ ldr x21, [sp, #1280]
+ ldr x27, [sp, #1272]
+ ldr x28, [sp, #1264]
+ ldr x23, [sp, #120] // 8-byte Folded Reload
+ ldr x1, [x23, #16]
+ add x8, sp, #1184 // =1184
+ ldr x0, [sp, #136] // 8-byte Folded Reload
+ bl .LmulPv576x64
+ cmn x19, x28
+ ldr x8, [sp, #1256]
+ ldr x9, [sp, #1248]
+ ldp x10, x1, [sp, #40]
+ adcs x10, x10, x27
+ ldr x11, [sp, #1240]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x21
+ ldr x13, [sp, #1232]
+ adcs x14, x14, x26
+ ldr x15, [sp, #1224]
+ ldp x18, x16, [sp, #56]
+ adcs x16, x16, x25
+ ldr x17, [sp, #1216]
+ adcs x18, x18, x24
+ ldr x0, [sp, #1208]
+ ldp x2, x4, [sp, #8]
+ adcs x1, x1, x2
+ ldr x2, [sp, #1192]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldr x4, [sp, #1184]
+ ldp x6, x19, [sp, #24]
+ adcs x5, x5, x6
+ ldr x6, [sp, #1200]
+ ldr x7, [sp, #88] // 8-byte Folded Reload
+ adcs x7, x7, x19
+ adds x19, x10, x4
+ adcs x10, x12, x2
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x6
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ mul x1, x19, x22
+ add x8, sp, #1104 // =1104
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #1176]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1168]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #1160]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #1152]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldr x24, [sp, #1144]
+ ldr x25, [sp, #1136]
+ ldr x26, [sp, #1128]
+ ldr x21, [sp, #1120]
+ ldr x27, [sp, #1112]
+ ldr x28, [sp, #1104]
+ ldr x1, [x23, #24]
+ add x8, sp, #1024 // =1024
+ ldr x22, [sp, #136] // 8-byte Folded Reload
+ mov x0, x22
+ bl .LmulPv576x64
+ cmn x19, x28
+ ldr x8, [sp, #1096]
+ ldr x9, [sp, #1088]
+ ldp x10, x1, [sp, #40]
+ adcs x10, x10, x27
+ ldr x11, [sp, #1080]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x21
+ ldr x13, [sp, #1072]
+ adcs x14, x14, x26
+ ldr x15, [sp, #1064]
+ ldp x18, x16, [sp, #56]
+ adcs x16, x16, x25
+ ldr x17, [sp, #1056]
+ adcs x18, x18, x24
+ ldr x0, [sp, #1048]
+ ldp x2, x4, [sp, #8]
+ adcs x1, x1, x2
+ ldr x2, [sp, #1032]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldr x4, [sp, #1024]
+ ldp x6, x19, [sp, #24]
+ adcs x5, x5, x6
+ ldr x6, [sp, #1040]
+ ldr x7, [sp, #88] // 8-byte Folded Reload
+ adcs x7, x7, x19
+ adds x19, x10, x4
+ adcs x10, x12, x2
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x6
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [sp, #128] // 8-byte Folded Reload
+ mul x1, x19, x8
+ add x8, sp, #944 // =944
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #1016]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #1008]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #1000]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #992]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldr x24, [sp, #984]
+ ldr x25, [sp, #976]
+ ldr x26, [sp, #968]
+ ldr x21, [sp, #960]
+ ldr x27, [sp, #952]
+ ldr x28, [sp, #944]
+ ldr x1, [x23, #32]
+ add x8, sp, #864 // =864
+ mov x0, x22
+ bl .LmulPv576x64
+ cmn x19, x28
+ ldr x8, [sp, #936]
+ ldr x9, [sp, #928]
+ ldp x10, x1, [sp, #40]
+ adcs x10, x10, x27
+ ldr x11, [sp, #920]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x21
+ ldr x13, [sp, #912]
+ adcs x14, x14, x26
+ ldr x15, [sp, #904]
+ ldp x18, x16, [sp, #56]
+ adcs x16, x16, x25
+ ldr x17, [sp, #896]
+ adcs x18, x18, x24
+ ldr x0, [sp, #888]
+ ldp x2, x4, [sp, #8]
+ adcs x1, x1, x2
+ ldr x2, [sp, #872]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldr x4, [sp, #864]
+ ldp x6, x19, [sp, #24]
+ adcs x5, x5, x6
+ ldr x6, [sp, #880]
+ ldr x7, [sp, #88] // 8-byte Folded Reload
+ adcs x7, x7, x19
+ adds x19, x10, x4
+ adcs x10, x12, x2
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x6
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x23, [sp, #128] // 8-byte Folded Reload
+ mul x1, x19, x23
+ add x8, sp, #784 // =784
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #856]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #848]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #840]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #832]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldr x24, [sp, #824]
+ ldr x25, [sp, #816]
+ ldr x26, [sp, #808]
+ ldr x21, [sp, #800]
+ ldr x27, [sp, #792]
+ ldr x28, [sp, #784]
+ ldr x22, [sp, #120] // 8-byte Folded Reload
+ ldr x1, [x22, #40]
+ add x8, sp, #704 // =704
+ ldr x0, [sp, #136] // 8-byte Folded Reload
+ bl .LmulPv576x64
+ cmn x19, x28
+ ldr x8, [sp, #776]
+ ldr x9, [sp, #768]
+ ldp x10, x1, [sp, #40]
+ adcs x10, x10, x27
+ ldr x11, [sp, #760]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x21
+ ldr x13, [sp, #752]
+ adcs x14, x14, x26
+ ldr x15, [sp, #744]
+ ldp x18, x16, [sp, #56]
+ adcs x16, x16, x25
+ ldr x17, [sp, #736]
+ adcs x18, x18, x24
+ ldr x0, [sp, #728]
+ ldp x2, x4, [sp, #8]
+ adcs x1, x1, x2
+ ldr x2, [sp, #712]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldr x4, [sp, #704]
+ ldp x6, x19, [sp, #24]
+ adcs x5, x5, x6
+ ldr x6, [sp, #720]
+ ldr x7, [sp, #88] // 8-byte Folded Reload
+ adcs x7, x7, x19
+ adds x19, x10, x4
+ adcs x10, x12, x2
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x6
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ mul x1, x19, x23
+ add x8, sp, #624 // =624
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #696]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #688]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #680]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #672]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldr x24, [sp, #664]
+ ldr x25, [sp, #656]
+ ldr x26, [sp, #648]
+ ldr x21, [sp, #640]
+ ldr x27, [sp, #632]
+ ldr x28, [sp, #624]
+ ldr x1, [x22, #48]
+ add x8, sp, #544 // =544
+ ldr x23, [sp, #136] // 8-byte Folded Reload
+ mov x0, x23
+ bl .LmulPv576x64
+ cmn x19, x28
+ ldr x8, [sp, #616]
+ ldr x9, [sp, #608]
+ ldp x10, x1, [sp, #40]
+ adcs x10, x10, x27
+ ldr x11, [sp, #600]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x21
+ ldr x13, [sp, #592]
+ adcs x14, x14, x26
+ ldr x15, [sp, #584]
+ ldp x18, x16, [sp, #56]
+ adcs x16, x16, x25
+ ldr x17, [sp, #576]
+ adcs x18, x18, x24
+ ldr x0, [sp, #568]
+ ldp x2, x4, [sp, #8]
+ adcs x1, x1, x2
+ ldr x2, [sp, #552]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldr x4, [sp, #544]
+ ldp x6, x19, [sp, #24]
+ adcs x5, x5, x6
+ ldr x6, [sp, #560]
+ ldr x7, [sp, #88] // 8-byte Folded Reload
+ adcs x7, x7, x19
+ adds x19, x10, x4
+ adcs x10, x12, x2
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x6
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x22, [sp, #128] // 8-byte Folded Reload
+ mul x1, x19, x22
+ add x8, sp, #464 // =464
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #536]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldr x8, [sp, #528]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldr x8, [sp, #520]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldr x8, [sp, #512]
+ str x8, [sp, #8] // 8-byte Folded Spill
+ ldp x25, x24, [sp, #496]
+ ldp x21, x26, [sp, #480]
+ ldp x28, x27, [sp, #464]
+ ldr x8, [sp, #120] // 8-byte Folded Reload
+ ldr x1, [x8, #56]
+ add x8, sp, #384 // =384
+ mov x0, x23
+ bl .LmulPv576x64
+ cmn x19, x28
+ ldp x9, x8, [sp, #448]
+ ldp x10, x1, [sp, #40]
+ adcs x10, x10, x27
+ ldp x13, x11, [sp, #432]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x21
+ adcs x14, x14, x26
+ ldp x17, x15, [sp, #416]
+ ldp x18, x16, [sp, #56]
+ adcs x16, x16, x25
+ adcs x18, x18, x24
+ ldp x2, x4, [sp, #8]
+ adcs x1, x1, x2
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x4
+ ldp x4, x2, [sp, #384]
+ ldp x6, x19, [sp, #24]
+ adcs x5, x5, x6
+ ldp x6, x0, [sp, #400]
+ ldr x7, [sp, #88] // 8-byte Folded Reload
+ adcs x7, x7, x19
+ adds x19, x10, x4
+ adcs x10, x12, x2
+ str x10, [sp, #40] // 8-byte Folded Spill
+ adcs x10, x14, x6
+ str x10, [sp, #80] // 8-byte Folded Spill
+ adcs x10, x16, x0
+ str x10, [sp, #72] // 8-byte Folded Spill
+ adcs x10, x18, x17
+ str x10, [sp, #64] // 8-byte Folded Spill
+ adcs x10, x1, x15
+ str x10, [sp, #56] // 8-byte Folded Spill
+ adcs x10, x3, x13
+ str x10, [sp, #48] // 8-byte Folded Spill
+ adcs x10, x5, x11
+ adcs x9, x7, x9
+ stp x9, x10, [sp, #96]
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ mul x1, x19, x22
+ add x8, sp, #304 // =304
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #376]
+ str x8, [sp, #32] // 8-byte Folded Spill
+ ldp x22, x8, [sp, #360]
+ str x8, [sp, #24] // 8-byte Folded Spill
+ ldp x24, x23, [sp, #344]
+ ldp x26, x25, [sp, #328]
+ ldp x27, x21, [sp, #312]
+ ldr x28, [sp, #304]
+ ldr x8, [sp, #120] // 8-byte Folded Reload
+ ldr x1, [x8, #64]
+ add x8, sp, #224 // =224
+ ldr x0, [sp, #136] // 8-byte Folded Reload
+ bl .LmulPv576x64
+ cmn x19, x28
+ ldp x9, x8, [sp, #288]
+ ldp x10, x1, [sp, #40]
+ adcs x10, x10, x27
+ ldp x13, x11, [sp, #272]
+ ldp x14, x12, [sp, #72]
+ adcs x12, x12, x21
+ adcs x14, x14, x26
+ ldp x17, x15, [sp, #256]
+ ldp x18, x16, [sp, #56]
+ adcs x16, x16, x25
+ adcs x18, x18, x24
+ adcs x1, x1, x23
+ ldp x4, x2, [sp, #224]
+ ldp x5, x3, [sp, #96]
+ adcs x3, x3, x22
+ ldp x6, x19, [sp, #24]
+ adcs x5, x5, x6
+ ldp x6, x0, [sp, #240]
+ ldr x7, [sp, #88] // 8-byte Folded Reload
+ adcs x7, x7, x19
+ adds x19, x10, x4
+ adcs x21, x12, x2
+ adcs x22, x14, x6
+ adcs x23, x16, x0
+ adcs x24, x18, x17
+ adcs x25, x1, x15
+ adcs x26, x3, x13
+ adcs x10, x5, x11
+ str x10, [sp, #136] // 8-byte Folded Spill
+ adcs x28, x7, x9
+ adcs x27, x8, xzr
+ ldr x8, [sp, #128] // 8-byte Folded Reload
+ mul x1, x19, x8
+ add x8, sp, #144 // =144
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x16, x8, [sp, #208]
+ ldp x9, x10, [sp, #144]
+ ldp x11, x12, [sp, #160]
+ cmn x19, x9
+ ldp x13, x9, [sp, #176]
+ adcs x10, x21, x10
+ ldp x14, x15, [sp, #192]
+ adcs x11, x22, x11
+ adcs x12, x23, x12
+ adcs x13, x24, x13
+ adcs x9, x25, x9
+ adcs x14, x26, x14
+ ldp x0, x17, [x20, #56]
+ ldp x2, x18, [x20, #40]
+ ldp x4, x1, [x20, #24]
+ ldp x6, x3, [x20, #8]
+ ldr x5, [x20]
+ ldr x7, [sp, #136] // 8-byte Folded Reload
+ adcs x15, x7, x15
+ adcs x16, x28, x16
+ adcs x8, x27, x8
+ subs x5, x10, x5
+ sbcs x6, x11, x6
+ sbcs x3, x12, x3
+ sbcs x4, x13, x4
+ sbcs x1, x9, x1
+ sbcs x2, x14, x2
+ sbcs x18, x15, x18
+ sbcs x0, x16, x0
+ sbcs x17, x8, x17
+ asr x7, x17, #63
+ cmp x7, #0 // =0
+ csel x10, x10, x5, lt
+ csel x11, x11, x6, lt
+ csel x12, x12, x3, lt
+ csel x13, x13, x4, lt
+ csel x9, x9, x1, lt
+ csel x14, x14, x2, lt
+ csel x15, x15, x18, lt
+ csel x16, x16, x0, lt
+ csel x8, x8, x17, lt
+ ldr x17, [sp, #112] // 8-byte Folded Reload
+ stp x10, x11, [x17]
+ stp x12, x13, [x17, #16]
+ stp x9, x14, [x17, #32]
+ stp x15, x16, [x17, #48]
+ str x8, [x17, #64]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end131:
+ .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L
+
+ .globl mcl_fp_montRed9L
+ .align 2
+ .type mcl_fp_montRed9L,@function
+mcl_fp_montRed9L: // @mcl_fp_montRed9L
+// BB#0:
+ stp x28, x27, [sp, #-96]!
+ stp x26, x25, [sp, #16]
+ stp x24, x23, [sp, #32]
+ stp x22, x21, [sp, #48]
+ stp x20, x19, [sp, #64]
+ stp x29, x30, [sp, #80]
+ add x29, sp, #80 // =80
+ sub sp, sp, #912 // =912
+ mov x20, x2
+ ldur x9, [x20, #-8]
+ str x9, [sp, #40] // 8-byte Folded Spill
+ ldr x8, [x20, #64]
+ str x8, [sp, #184] // 8-byte Folded Spill
+ ldr x8, [x20, #48]
+ str x8, [sp, #168] // 8-byte Folded Spill
+ ldr x8, [x20, #56]
+ str x8, [sp, #176] // 8-byte Folded Spill
+ ldr x8, [x20, #32]
+ str x8, [sp, #144] // 8-byte Folded Spill
+ ldr x8, [x20, #40]
+ str x8, [sp, #152] // 8-byte Folded Spill
+ ldr x8, [x20, #16]
+ str x8, [sp, #128] // 8-byte Folded Spill
+ ldr x8, [x20, #24]
+ str x8, [sp, #136] // 8-byte Folded Spill
+ ldr x8, [x20]
+ str x8, [sp, #112] // 8-byte Folded Spill
+ ldr x8, [x20, #8]
+ str x8, [sp, #120] // 8-byte Folded Spill
+ ldr x8, [x1, #128]
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldr x8, [x1, #136]
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldr x8, [x1, #112]
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [x1, #120]
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [x1, #96]
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [x1, #104]
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [x1, #80]
+ str x8, [sp, #48] // 8-byte Folded Spill
+ ldr x8, [x1, #88]
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldp x23, x8, [x1, #64]
+ str x8, [sp, #16] // 8-byte Folded Spill
+ ldp x25, x19, [x1, #48]
+ ldp x28, x27, [x1, #32]
+ ldp x22, x24, [x1, #16]
+ ldp x21, x26, [x1]
+ str x0, [sp, #160] // 8-byte Folded Spill
+ mul x1, x21, x9
+ sub x8, x29, #160 // =160
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [x29, #-96]
+ ldp x11, x10, [x29, #-112]
+ ldp x13, x12, [x29, #-128]
+ ldp x14, x15, [x29, #-160]
+ ldp x16, x17, [x29, #-144]
+ cmn x21, x14
+ adcs x21, x26, x15
+ adcs x14, x22, x16
+ adcs x24, x24, x17
+ adcs x26, x28, x13
+ adcs x27, x27, x12
+ adcs x25, x25, x11
+ adcs x10, x19, x10
+ stp x10, x14, [sp, #24]
+ adcs x23, x23, x9
+ ldr x9, [sp, #16] // 8-byte Folded Reload
+ adcs x28, x9, x8
+ ldr x8, [sp, #48] // 8-byte Folded Reload
+ adcs x22, x8, xzr
+ ldr x8, [sp, #56] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #56] // 8-byte Folded Spill
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [sp, #88] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [sp, #96] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #104] // 8-byte Folded Spill
+ adcs x8, xzr, xzr
+ str x8, [sp, #48] // 8-byte Folded Spill
+ ldr x19, [sp, #40] // 8-byte Folded Reload
+ mul x1, x21, x19
+ sub x8, x29, #240 // =240
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [x29, #-176]
+ ldp x11, x10, [x29, #-192]
+ ldp x13, x12, [x29, #-208]
+ ldp x14, x15, [x29, #-240]
+ ldp x16, x17, [x29, #-224]
+ cmn x21, x14
+ ldr x14, [sp, #32] // 8-byte Folded Reload
+ adcs x21, x14, x15
+ adcs x14, x24, x16
+ adcs x26, x26, x17
+ adcs x27, x27, x13
+ adcs x25, x25, x12
+ ldr x12, [sp, #24] // 8-byte Folded Reload
+ adcs x11, x12, x11
+ stp x11, x14, [sp, #24]
+ adcs x23, x23, x10
+ adcs x28, x28, x9
+ adcs x22, x22, x8
+ ldr x8, [sp, #56] // 8-byte Folded Reload
+ adcs x24, x8, xzr
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #64] // 8-byte Folded Spill
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [sp, #88] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [sp, #96] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldr x8, [sp, #48] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #56] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #672 // =672
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #744]
+ ldr x9, [sp, #736]
+ ldr x10, [sp, #728]
+ ldr x11, [sp, #720]
+ ldr x12, [sp, #712]
+ ldr x13, [sp, #704]
+ ldr x14, [sp, #672]
+ ldr x15, [sp, #680]
+ ldr x16, [sp, #688]
+ ldr x17, [sp, #696]
+ cmn x21, x14
+ ldr x14, [sp, #32] // 8-byte Folded Reload
+ adcs x21, x14, x15
+ adcs x14, x26, x16
+ str x14, [sp, #48] // 8-byte Folded Spill
+ adcs x27, x27, x17
+ adcs x25, x25, x13
+ ldr x13, [sp, #24] // 8-byte Folded Reload
+ adcs x12, x13, x12
+ str x12, [sp, #32] // 8-byte Folded Spill
+ adcs x23, x23, x11
+ adcs x28, x28, x10
+ adcs x22, x22, x9
+ adcs x24, x24, x8
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x26, x8, xzr
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [sp, #88] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [sp, #96] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldr x8, [sp, #56] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #64] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #592 // =592
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #664]
+ ldr x9, [sp, #656]
+ ldr x10, [sp, #648]
+ ldr x11, [sp, #640]
+ ldr x12, [sp, #632]
+ ldr x13, [sp, #624]
+ ldr x14, [sp, #592]
+ ldr x15, [sp, #600]
+ ldr x16, [sp, #608]
+ ldr x17, [sp, #616]
+ cmn x21, x14
+ ldr x14, [sp, #48] // 8-byte Folded Reload
+ adcs x21, x14, x15
+ adcs x14, x27, x16
+ str x14, [sp, #56] // 8-byte Folded Spill
+ adcs x25, x25, x17
+ ldr x14, [sp, #32] // 8-byte Folded Reload
+ adcs x13, x14, x13
+ str x13, [sp, #48] // 8-byte Folded Spill
+ adcs x23, x23, x12
+ adcs x28, x28, x11
+ adcs x22, x22, x10
+ adcs x24, x24, x9
+ adcs x26, x26, x8
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x27, x8, xzr
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ ldr x8, [sp, #88] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [sp, #96] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldr x8, [sp, #64] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #72] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #512 // =512
+ mov x0, x20
+ bl .LmulPv576x64
+ ldr x8, [sp, #584]
+ ldr x9, [sp, #576]
+ ldr x10, [sp, #568]
+ ldr x11, [sp, #560]
+ ldr x12, [sp, #552]
+ ldr x13, [sp, #544]
+ ldr x14, [sp, #512]
+ ldr x15, [sp, #520]
+ ldr x16, [sp, #528]
+ ldr x17, [sp, #536]
+ cmn x21, x14
+ ldr x14, [sp, #56] // 8-byte Folded Reload
+ adcs x21, x14, x15
+ adcs x14, x25, x16
+ str x14, [sp, #64] // 8-byte Folded Spill
+ ldr x14, [sp, #48] // 8-byte Folded Reload
+ adcs x14, x14, x17
+ str x14, [sp, #56] // 8-byte Folded Spill
+ adcs x23, x23, x13
+ adcs x28, x28, x12
+ adcs x22, x22, x11
+ adcs x24, x24, x10
+ adcs x26, x26, x9
+ adcs x27, x27, x8
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x25, x8, xzr
+ ldr x8, [sp, #88] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [sp, #96] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldr x8, [sp, #72] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #80] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #432 // =432
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #496]
+ ldp x11, x10, [sp, #480]
+ ldp x13, x12, [sp, #464]
+ ldp x14, x15, [sp, #432]
+ ldp x16, x17, [sp, #448]
+ cmn x21, x14
+ ldr x14, [sp, #64] // 8-byte Folded Reload
+ adcs x21, x14, x15
+ ldr x14, [sp, #56] // 8-byte Folded Reload
+ adcs x14, x14, x16
+ adcs x23, x23, x17
+ adcs x28, x28, x13
+ adcs x22, x22, x12
+ adcs x24, x24, x11
+ adcs x26, x26, x10
+ adcs x27, x27, x9
+ adcs x25, x25, x8
+ ldr x8, [sp, #88] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [sp, #96] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #96] // 8-byte Folded Spill
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ stp x14, x8, [sp, #72]
+ mul x1, x21, x19
+ add x8, sp, #352 // =352
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #416]
+ ldp x11, x10, [sp, #400]
+ ldp x13, x12, [sp, #384]
+ ldp x14, x15, [sp, #352]
+ ldp x16, x17, [sp, #368]
+ cmn x21, x14
+ ldr x14, [sp, #72] // 8-byte Folded Reload
+ adcs x21, x14, x15
+ adcs x14, x23, x16
+ str x14, [sp, #72] // 8-byte Folded Spill
+ adcs x28, x28, x17
+ adcs x22, x22, x13
+ adcs x24, x24, x12
+ adcs x26, x26, x11
+ adcs x27, x27, x10
+ adcs x25, x25, x9
+ ldr x9, [sp, #88] // 8-byte Folded Reload
+ adcs x8, x9, x8
+ str x8, [sp, #88] // 8-byte Folded Spill
+ ldr x8, [sp, #96] // 8-byte Folded Reload
+ adcs x23, x8, xzr
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #104] // 8-byte Folded Spill
+ ldr x8, [sp, #80] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #96] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #272 // =272
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #336]
+ ldp x11, x10, [sp, #320]
+ ldp x13, x12, [sp, #304]
+ ldp x14, x15, [sp, #272]
+ ldp x16, x17, [sp, #288]
+ cmn x21, x14
+ ldr x14, [sp, #72] // 8-byte Folded Reload
+ adcs x21, x14, x15
+ adcs x14, x28, x16
+ adcs x22, x22, x17
+ adcs x24, x24, x13
+ adcs x26, x26, x12
+ adcs x27, x27, x11
+ adcs x25, x25, x10
+ ldr x10, [sp, #88] // 8-byte Folded Reload
+ adcs x9, x10, x9
+ stp x14, x9, [sp, #80]
+ adcs x23, x23, x8
+ ldr x8, [sp, #104] // 8-byte Folded Reload
+ adcs x28, x8, xzr
+ ldr x8, [sp, #96] // 8-byte Folded Reload
+ adcs x8, x8, xzr
+ str x8, [sp, #104] // 8-byte Folded Spill
+ mul x1, x21, x19
+ add x8, sp, #192 // =192
+ mov x0, x20
+ bl .LmulPv576x64
+ ldp x9, x8, [sp, #256]
+ ldp x11, x10, [sp, #240]
+ ldp x13, x12, [sp, #224]
+ ldp x14, x15, [sp, #192]
+ ldp x16, x17, [sp, #208]
+ cmn x21, x14
+ ldr x14, [sp, #80] // 8-byte Folded Reload
+ adcs x14, x14, x15
+ adcs x15, x22, x16
+ adcs x16, x24, x17
+ adcs x13, x26, x13
+ adcs x12, x27, x12
+ adcs x11, x25, x11
+ ldr x17, [sp, #88] // 8-byte Folded Reload
+ adcs x10, x17, x10
+ adcs x9, x23, x9
+ adcs x8, x28, x8
+ ldp x17, x18, [sp, #104]
+ adcs x17, x17, xzr
+ subs x18, x14, x18
+ ldp x0, x1, [sp, #120]
+ sbcs x0, x15, x0
+ sbcs x1, x16, x1
+ ldp x2, x3, [sp, #136]
+ sbcs x2, x13, x2
+ sbcs x3, x12, x3
+ ldr x4, [sp, #152] // 8-byte Folded Reload
+ sbcs x4, x11, x4
+ ldp x5, x6, [sp, #168]
+ sbcs x5, x10, x5
+ sbcs x6, x9, x6
+ ldr x7, [sp, #184] // 8-byte Folded Reload
+ sbcs x7, x8, x7
+ sbcs x17, x17, xzr
+ tst x17, #0x1
+ csel x14, x14, x18, ne
+ csel x15, x15, x0, ne
+ csel x16, x16, x1, ne
+ csel x13, x13, x2, ne
+ csel x12, x12, x3, ne
+ csel x11, x11, x4, ne
+ csel x10, x10, x5, ne
+ csel x9, x9, x6, ne
+ csel x8, x8, x7, ne
+ ldr x17, [sp, #160] // 8-byte Folded Reload
+ stp x14, x15, [x17]
+ stp x16, x13, [x17, #16]
+ stp x12, x11, [x17, #32]
+ stp x10, x9, [x17, #48]
+ str x8, [x17, #64]
+ sub sp, x29, #80 // =80
+ ldp x29, x30, [sp, #80]
+ ldp x20, x19, [sp, #64]
+ ldp x22, x21, [sp, #48]
+ ldp x24, x23, [sp, #32]
+ ldp x26, x25, [sp, #16]
+ ldp x28, x27, [sp], #96
+ ret
+.Lfunc_end132:
+ .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L
+
+ .globl mcl_fp_addPre9L
+ .align 2
+ .type mcl_fp_addPre9L,@function
+mcl_fp_addPre9L: // @mcl_fp_addPre9L
+// BB#0:
+ ldp x11, x8, [x2, #56]
+ ldp x13, x9, [x1, #56]
+ ldp x15, x10, [x2, #40]
+ ldp x17, x12, [x1, #40]
+ ldp x3, x14, [x2, #24]
+ ldr x4, [x2]
+ ldp x2, x18, [x2, #8]
+ ldp x5, x6, [x1]
+ ldr x7, [x1, #16]
+ ldp x1, x16, [x1, #24]
+ adds x4, x4, x5
+ adcs x2, x2, x6
+ stp x4, x2, [x0]
+ adcs x18, x18, x7
+ str x18, [x0, #16]
+ adcs x18, x3, x1
+ adcs x14, x14, x16
+ stp x18, x14, [x0, #24]
+ adcs x14, x15, x17
+ adcs x10, x10, x12
+ stp x14, x10, [x0, #40]
+ adcs x10, x11, x13
+ adcs x9, x8, x9
+ adcs x8, xzr, xzr
+ stp x10, x9, [x0, #56]
+ mov x0, x8
+ ret
+.Lfunc_end133:
+ .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L
+
+ .globl mcl_fp_subPre9L
+ .align 2
+ .type mcl_fp_subPre9L,@function
+mcl_fp_subPre9L: // @mcl_fp_subPre9L
+// BB#0:
+ ldp x11, x8, [x2, #56]
+ ldp x13, x9, [x1, #56]
+ ldp x15, x10, [x2, #40]
+ ldp x17, x12, [x1, #40]
+ ldp x3, x14, [x2, #24]
+ ldr x4, [x2]
+ ldp x2, x18, [x2, #8]
+ ldp x5, x6, [x1]
+ ldr x7, [x1, #16]
+ ldp x1, x16, [x1, #24]
+ subs x4, x5, x4
+ sbcs x2, x6, x2
+ stp x4, x2, [x0]
+ sbcs x18, x7, x18
+ str x18, [x0, #16]
+ sbcs x18, x1, x3
+ sbcs x14, x16, x14
+ stp x18, x14, [x0, #24]
+ sbcs x14, x17, x15
+ sbcs x10, x12, x10
+ stp x14, x10, [x0, #40]
+ sbcs x10, x13, x11
+ sbcs x9, x9, x8
+ ngcs x8, xzr
+ and x8, x8, #0x1
+ stp x10, x9, [x0, #56]
+ mov x0, x8
+ ret
+.Lfunc_end134:
+ .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L
+
+ .globl mcl_fp_shr1_9L
+ .align 2
+ .type mcl_fp_shr1_9L,@function
+mcl_fp_shr1_9L: // @mcl_fp_shr1_9L
+// BB#0:
+ ldp x8, x9, [x1]
+ ldp x12, x10, [x1, #56]
+ ldp x16, x11, [x1, #40]
+ ldp x13, x14, [x1, #16]
+ ldr x15, [x1, #32]
+ extr x8, x9, x8, #1
+ extr x9, x13, x9, #1
+ extr x13, x14, x13, #1
+ extr x14, x15, x14, #1
+ extr x15, x16, x15, #1
+ extr x16, x11, x16, #1
+ extr x11, x12, x11, #1
+ extr x12, x10, x12, #1
+ lsr x10, x10, #1
+ stp x8, x9, [x0]
+ stp x13, x14, [x0, #16]
+ stp x15, x16, [x0, #32]
+ stp x11, x12, [x0, #48]
+ str x10, [x0, #64]
+ ret
+.Lfunc_end135:
+ .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L
+
+ .globl mcl_fp_add9L
+ .align 2
+ .type mcl_fp_add9L,@function
+mcl_fp_add9L: // @mcl_fp_add9L
+// BB#0:
+ stp x24, x23, [sp, #-48]!
+ stp x22, x21, [sp, #16]
+ stp x20, x19, [sp, #32]
+ ldp x11, x8, [x2, #56]
+ ldp x13, x9, [x1, #56]
+ ldp x15, x10, [x2, #40]
+ ldp x17, x12, [x1, #40]
+ ldp x4, x14, [x2, #24]
+ ldr x5, [x2]
+ ldp x2, x18, [x2, #8]
+ ldp x6, x7, [x1]
+ ldr x19, [x1, #16]
+ ldp x1, x16, [x1, #24]
+ adds x5, x5, x6
+ adcs x2, x2, x7
+ adcs x18, x18, x19
+ ldp x21, x7, [x3, #40]
+ ldp x19, x6, [x3, #56]
+ adcs x1, x4, x1
+ adcs x4, x14, x16
+ ldr x20, [x3, #32]
+ adcs x17, x15, x17
+ adcs x10, x10, x12
+ ldp x12, x14, [x3]
+ stp x5, x2, [x0]
+ stp x18, x1, [x0, #16]
+ stp x4, x17, [x0, #32]
+ adcs x22, x11, x13
+ stp x10, x22, [x0, #48]
+ adcs x8, x8, x9
+ str x8, [x0, #64]
+ adcs x23, xzr, xzr
+ ldp x9, x11, [x3, #16]
+ subs x16, x5, x12
+ sbcs x15, x2, x14
+ sbcs x14, x18, x9
+ sbcs x13, x1, x11
+ sbcs x12, x4, x20
+ sbcs x11, x17, x21
+ sbcs x10, x10, x7
+ sbcs x9, x22, x19
+ sbcs x8, x8, x6
+ sbcs x17, x23, xzr
+ and w17, w17, #0x1
+ tbnz w17, #0, .LBB136_2
+// BB#1: // %nocarry
+ stp x16, x15, [x0]
+ stp x14, x13, [x0, #16]
+ stp x12, x11, [x0, #32]
+ stp x10, x9, [x0, #48]
+ str x8, [x0, #64]
+.LBB136_2: // %carry
+ ldp x20, x19, [sp, #32]
+ ldp x22, x21, [sp, #16]
+ ldp x24, x23, [sp], #48
+ ret
+.Lfunc_end136:
+ .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L
+
+ .globl mcl_fp_addNF9L
+ .align 2
+ .type mcl_fp_addNF9L,@function
+mcl_fp_addNF9L: // @mcl_fp_addNF9L
+// BB#0:
+ stp x20, x19, [sp, #-16]!
+ ldp x11, x8, [x1, #56]
+ ldp x13, x9, [x2, #56]
+ ldp x15, x10, [x1, #40]
+ ldp x17, x12, [x2, #40]
+ ldp x4, x14, [x1, #24]
+ ldr x5, [x1]
+ ldp x1, x18, [x1, #8]
+ ldp x6, x7, [x2]
+ ldr x19, [x2, #16]
+ ldp x2, x16, [x2, #24]
+ adds x5, x6, x5
+ adcs x1, x7, x1
+ adcs x18, x19, x18
+ ldp x19, x6, [x3, #56]
+ adcs x2, x2, x4
+ adcs x14, x16, x14
+ ldp x4, x7, [x3, #40]
+ adcs x15, x17, x15
+ adcs x10, x12, x10
+ ldp x12, x17, [x3]
+ adcs x11, x13, x11
+ ldr x13, [x3, #16]
+ ldp x3, x16, [x3, #24]
+ adcs x8, x9, x8
+ subs x9, x5, x12
+ sbcs x12, x1, x17
+ sbcs x13, x18, x13
+ sbcs x17, x2, x3
+ sbcs x16, x14, x16
+ sbcs x3, x15, x4
+ sbcs x4, x10, x7
+ sbcs x7, x11, x19
+ sbcs x6, x8, x6
+ asr x19, x6, #63
+ cmp x19, #0 // =0
+ csel x9, x5, x9, lt
+ csel x12, x1, x12, lt
+ csel x13, x18, x13, lt
+ csel x17, x2, x17, lt
+ csel x14, x14, x16, lt
+ csel x15, x15, x3, lt
+ csel x10, x10, x4, lt
+ csel x11, x11, x7, lt
+ csel x8, x8, x6, lt
+ stp x9, x12, [x0]
+ stp x13, x17, [x0, #16]
+ stp x14, x15, [x0, #32]
+ stp x10, x11, [x0, #48]
+ str x8, [x0, #64]
+ ldp x20, x19, [sp], #16
+ ret
+.Lfunc_end137:
+ .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L
+
+ .globl mcl_fp_sub9L
+ .align 2
+ .type mcl_fp_sub9L,@function
+mcl_fp_sub9L: // @mcl_fp_sub9L
+// BB#0:
+ stp x20, x19, [sp, #-16]!
+ ldp x15, x16, [x2, #56]
+ ldp x4, x17, [x1, #56]
+ ldp x13, x14, [x2, #40]
+ ldp x6, x18, [x1, #40]
+ ldp x11, x12, [x2, #24]
+ ldp x9, x10, [x2, #8]
+ ldr x8, [x2]
+ ldp x2, x7, [x1]
+ ldr x19, [x1, #16]
+ ldp x1, x5, [x1, #24]
+ subs x8, x2, x8
+ sbcs x9, x7, x9
+ stp x8, x9, [x0]
+ sbcs x10, x19, x10
+ sbcs x11, x1, x11
+ stp x10, x11, [x0, #16]
+ sbcs x12, x5, x12
+ sbcs x13, x6, x13
+ stp x12, x13, [x0, #32]
+ sbcs x14, x18, x14
+ sbcs x15, x4, x15
+ stp x14, x15, [x0, #48]
+ sbcs x16, x17, x16
+ str x16, [x0, #64]
+ ngcs x17, xzr
+ and w17, w17, #0x1
+ tbnz w17, #0, .LBB138_2
+// BB#1: // %nocarry
+ ldp x20, x19, [sp], #16
+ ret
+.LBB138_2: // %carry
+ ldp x18, x1, [x3]
+ ldp x2, x4, [x3, #16]
+ ldp x5, x6, [x3, #32]
+ adds x8, x18, x8
+ adcs x9, x1, x9
+ ldr x18, [x3, #48]
+ ldp x1, x17, [x3, #56]
+ adcs x10, x2, x10
+ adcs x11, x4, x11
+ adcs x12, x5, x12
+ adcs x13, x6, x13
+ adcs x14, x18, x14
+ adcs x15, x1, x15
+ adcs x16, x17, x16
+ stp x8, x9, [x0]
+ stp x10, x11, [x0, #16]
+ stp x12, x13, [x0, #32]
+ stp x14, x15, [x0, #48]
+ str x16, [x0, #64]
+ ldp x20, x19, [sp], #16
+ ret
+.Lfunc_end138:
+ .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L
+
+ .globl mcl_fp_subNF9L
+ .align 2
+ .type mcl_fp_subNF9L,@function
+mcl_fp_subNF9L: // @mcl_fp_subNF9L
+// BB#0:
+ stp x20, x19, [sp, #-16]!
+ ldp x11, x8, [x2, #56]
+ ldp x13, x9, [x1, #56]
+ ldp x15, x10, [x2, #40]
+ ldp x17, x12, [x1, #40]
+ ldp x4, x14, [x2, #24]
+ ldr x5, [x2]
+ ldp x2, x18, [x2, #8]
+ ldp x6, x7, [x1]
+ ldr x19, [x1, #16]
+ ldp x1, x16, [x1, #24]
+ subs x5, x6, x5
+ sbcs x2, x7, x2
+ sbcs x18, x19, x18
+ ldp x19, x6, [x3, #56]
+ sbcs x1, x1, x4
+ sbcs x14, x16, x14
+ ldp x4, x7, [x3, #40]
+ sbcs x15, x17, x15
+ sbcs x10, x12, x10
+ ldp x12, x17, [x3]
+ sbcs x11, x13, x11
+ sbcs x8, x9, x8
+ asr x9, x8, #63
+ extr x13, x9, x8, #63
+ and x12, x13, x12
+ ldr x13, [x3, #16]
+ ldp x3, x16, [x3, #24]
+ and x19, x9, x19
+ and x6, x9, x6
+ ror x9, x9, #63
+ and x17, x9, x17
+ and x13, x9, x13
+ and x3, x9, x3
+ and x16, x9, x16
+ and x4, x9, x4
+ and x9, x9, x7
+ adds x12, x12, x5
+ str x12, [x0]
+ adcs x12, x17, x2
+ str x12, [x0, #8]
+ adcs x12, x13, x18
+ str x12, [x0, #16]
+ adcs x12, x3, x1
+ str x12, [x0, #24]
+ adcs x12, x16, x14
+ str x12, [x0, #32]
+ adcs x12, x4, x15
+ adcs x9, x9, x10
+ stp x12, x9, [x0, #40]
+ adcs x9, x19, x11
+ adcs x8, x6, x8
+ stp x9, x8, [x0, #56]
+ ldp x20, x19, [sp], #16
+ ret
+.Lfunc_end139:
+ .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L
+
+ .globl mcl_fpDbl_add9L
+ .align 2
+ .type mcl_fpDbl_add9L,@function
+mcl_fpDbl_add9L: // @mcl_fpDbl_add9L
+// BB#0:
+ stp x20, x19, [sp, #-16]!
+ ldp x10, x8, [x2, #128]
+ ldp x11, x9, [x1, #128]
+ ldp x12, x13, [x2, #112]
+ ldp x14, x15, [x1, #112]
+ ldp x16, x17, [x2, #96]
+ ldp x18, x4, [x2]
+ ldp x5, x6, [x1]
+ ldp x7, x19, [x2, #16]
+ adds x18, x18, x5
+ adcs x4, x4, x6
+ ldp x5, x6, [x1, #16]
+ str x18, [x0]
+ adcs x18, x7, x5
+ ldp x5, x7, [x1, #96]
+ str x4, [x0, #8]
+ ldr x4, [x1, #32]
+ str x18, [x0, #16]
+ adcs x18, x19, x6
+ ldp x6, x19, [x2, #32]
+ str x18, [x0, #24]
+ adcs x4, x6, x4
+ ldp x18, x6, [x1, #40]
+ str x4, [x0, #32]
+ adcs x18, x19, x18
+ ldp x4, x19, [x2, #48]
+ str x18, [x0, #40]
+ adcs x4, x4, x6
+ ldp x18, x6, [x1, #56]
+ str x4, [x0, #48]
+ adcs x18, x19, x18
+ ldp x4, x19, [x2, #64]
+ str x18, [x0, #56]
+ ldr x18, [x1, #72]
+ adcs x4, x4, x6
+ ldp x6, x2, [x2, #80]
+ str x4, [x0, #64]
+ ldp x4, x1, [x1, #80]
+ adcs x18, x19, x18
+ adcs x4, x6, x4
+ adcs x1, x2, x1
+ ldp x6, x19, [x3, #56]
+ adcs x16, x16, x5
+ adcs x17, x17, x7
+ ldp x7, x2, [x3, #40]
+ adcs x12, x12, x14
+ adcs x13, x13, x15
+ ldp x15, x5, [x3, #24]
+ adcs x10, x10, x11
+ ldr x11, [x3]
+ ldp x3, x14, [x3, #8]
+ adcs x8, x8, x9
+ adcs x9, xzr, xzr
+ subs x11, x18, x11
+ sbcs x3, x4, x3
+ sbcs x14, x1, x14
+ sbcs x15, x16, x15
+ sbcs x5, x17, x5
+ sbcs x7, x12, x7
+ sbcs x2, x13, x2
+ sbcs x6, x10, x6
+ sbcs x19, x8, x19
+ sbcs x9, x9, xzr
+ tst x9, #0x1
+ csel x9, x18, x11, ne
+ csel x11, x4, x3, ne
+ csel x14, x1, x14, ne
+ csel x15, x16, x15, ne
+ csel x16, x17, x5, ne
+ csel x12, x12, x7, ne
+ csel x13, x13, x2, ne
+ csel x10, x10, x6, ne
+ csel x8, x8, x19, ne
+ stp x9, x11, [x0, #72]
+ stp x14, x15, [x0, #88]
+ stp x16, x12, [x0, #104]
+ stp x13, x10, [x0, #120]
+ str x8, [x0, #136]
+ ldp x20, x19, [sp], #16
+ ret
+.Lfunc_end140:
+ .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L
+
+ .globl mcl_fpDbl_sub9L
+ .align 2
+ .type mcl_fpDbl_sub9L,@function
+mcl_fpDbl_sub9L: // @mcl_fpDbl_sub9L
+// BB#0:
+ ldp x10, x8, [x2, #128]
+ ldp x11, x9, [x1, #128]
+ ldp x14, x12, [x2, #112]
+ ldp x15, x13, [x1, #112]
+ ldp x16, x17, [x2]
+ ldp x18, x4, [x1]
+ ldp x5, x6, [x2, #96]
+ ldr x7, [x1, #16]
+ subs x16, x18, x16
+ sbcs x17, x4, x17
+ ldp x18, x4, [x2, #16]
+ str x16, [x0]
+ ldr x16, [x1, #24]
+ sbcs x18, x7, x18
+ str x17, [x0, #8]
+ ldp x17, x7, [x2, #32]
+ str x18, [x0, #16]
+ sbcs x16, x16, x4
+ ldp x18, x4, [x1, #32]
+ str x16, [x0, #24]
+ sbcs x16, x18, x17
+ ldp x17, x18, [x2, #48]
+ str x16, [x0, #32]
+ sbcs x4, x4, x7
+ ldp x16, x7, [x1, #48]
+ str x4, [x0, #40]
+ sbcs x16, x16, x17
+ ldp x17, x4, [x2, #80]
+ str x16, [x0, #48]
+ ldr x16, [x1, #64]
+ sbcs x18, x7, x18
+ ldp x7, x2, [x2, #64]
+ str x18, [x0, #56]
+ ldr x18, [x1, #72]
+ sbcs x16, x16, x7
+ str x16, [x0, #64]
+ ldp x16, x7, [x1, #80]
+ sbcs x18, x18, x2
+ ldp x2, x1, [x1, #96]
+ sbcs x16, x16, x17
+ sbcs x4, x7, x4
+ sbcs x2, x2, x5
+ ldp x7, x17, [x3, #56]
+ sbcs x1, x1, x6
+ sbcs x14, x15, x14
+ ldp x6, x5, [x3, #40]
+ sbcs x12, x13, x12
+ sbcs x10, x11, x10
+ ldp x13, x15, [x3, #24]
+ sbcs x8, x9, x8
+ ngcs x9, xzr
+ tst x9, #0x1
+ ldr x9, [x3]
+ ldp x3, x11, [x3, #8]
+ csel x17, x17, xzr, ne
+ csel x7, x7, xzr, ne
+ csel x5, x5, xzr, ne
+ csel x6, x6, xzr, ne
+ csel x15, x15, xzr, ne
+ csel x13, x13, xzr, ne
+ csel x11, x11, xzr, ne
+ csel x3, x3, xzr, ne
+ csel x9, x9, xzr, ne
+ adds x9, x9, x18
+ str x9, [x0, #72]
+ adcs x9, x3, x16
+ str x9, [x0, #80]
+ adcs x9, x11, x4
+ str x9, [x0, #88]
+ adcs x9, x13, x2
+ str x9, [x0, #96]
+ adcs x9, x15, x1
+ str x9, [x0, #104]
+ adcs x9, x6, x14
+ str x9, [x0, #112]
+ adcs x9, x5, x12
+ str x9, [x0, #120]
+ adcs x9, x7, x10
+ adcs x8, x17, x8
+ stp x9, x8, [x0, #128]
+ ret
+.Lfunc_end141:
+ .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L
+
+
+ .section ".note.GNU-stack","",@progbits
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/arm.s b/vendor/github.com/tangerine-network/mcl/src/asm/arm.s
new file mode 100644
index 000000000..2df9bfb92
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/arm.s
@@ -0,0 +1,84189 @@
+ .text
+ .syntax unified
+ .eabi_attribute 67, "2.09" @ Tag_conformance
+ .eabi_attribute 6, 1 @ Tag_CPU_arch
+ .eabi_attribute 8, 1 @ Tag_ARM_ISA_use
+ .eabi_attribute 15, 1 @ Tag_ABI_PCS_RW_data
+ .eabi_attribute 16, 1 @ Tag_ABI_PCS_RO_data
+ .eabi_attribute 17, 2 @ Tag_ABI_PCS_GOT_use
+ .eabi_attribute 20, 1 @ Tag_ABI_FP_denormal
+ .eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions
+ .eabi_attribute 23, 3 @ Tag_ABI_FP_number_model
+ .eabi_attribute 34, 1 @ Tag_CPU_unaligned_access
+ .eabi_attribute 24, 1 @ Tag_ABI_align_needed
+ .eabi_attribute 25, 1 @ Tag_ABI_align_preserved
+ .eabi_attribute 28, 1 @ Tag_ABI_VFP_args
+ .eabi_attribute 38, 1 @ Tag_ABI_FP_16bit_format
+ .eabi_attribute 14, 0 @ Tag_ABI_PCS_R9_use
+ .file "<stdin>"
+ .globl makeNIST_P192L
+ .align 2
+ .type makeNIST_P192L,%function
+makeNIST_P192L: @ @makeNIST_P192L
+ .fnstart
+@ BB#0:
+ mvn r1, #0
+ mvn r2, #1
+ str r1, [r0]
+ stmib r0, {r1, r2}
+ str r1, [r0, #12]
+ str r1, [r0, #16]
+ str r1, [r0, #20]
+ mov pc, lr
+.Lfunc_end0:
+ .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mod_NIST_P192L
+ .align 2
+ .type mcl_fpDbl_mod_NIST_P192L,%function
+mcl_fpDbl_mod_NIST_P192L: @ @mcl_fpDbl_mod_NIST_P192L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #8
+ sub sp, sp, #8
+ add lr, r1, #24
+ ldr r2, [r1, #40]
+ ldr r3, [r1, #44]
+ ldr r7, [r1, #16]
+ ldr r8, [r1, #20]
+ ldm lr, {r4, r5, r6, lr}
+ ldm r1, {r1, r9, r10, r12}
+ adds r11, r4, r1
+ adcs r9, r5, r9
+ adcs r10, r6, r10
+ adcs r1, lr, r12
+ str r1, [sp, #4] @ 4-byte Spill
+ adcs r1, r2, r7
+ mov r7, #0
+ str r1, [sp] @ 4-byte Spill
+ adcs r8, r3, r8
+ mov r1, #0
+ adcs r1, r1, #0
+ adc r12, r7, #0
+ ldr r7, [sp, #4] @ 4-byte Reload
+ adds r11, r11, r2
+ adcs r9, r9, r3
+ adcs r4, r10, r4
+ adcs r5, r7, r5
+ ldr r7, [sp] @ 4-byte Reload
+ adcs r6, r7, r6
+ adcs r7, r8, lr
+ adcs r1, r1, #0
+ adc r12, r12, #0
+ adds lr, r4, r2
+ adcs r3, r5, r3
+ adcs r6, r6, #0
+ adcs r7, r7, #0
+ adcs r1, r1, #0
+ adc r5, r12, #0
+ adds r12, r1, r11
+ adcs r11, r5, r9
+ adcs r10, r1, lr
+ mov r1, #0
+ adcs r8, r5, r3
+ adcs lr, r6, #0
+ adcs r2, r7, #0
+ adc r9, r1, #0
+ adds r7, r12, #1
+ str r2, [sp, #4] @ 4-byte Spill
+ adcs r6, r11, #0
+ adcs r3, r10, #1
+ adcs r5, r8, #0
+ adcs r1, lr, #0
+ adcs r2, r2, #0
+ sbc r4, r9, #0
+ ands r4, r4, #1
+ movne r7, r12
+ movne r6, r11
+ movne r3, r10
+ cmp r4, #0
+ movne r5, r8
+ movne r1, lr
+ str r7, [r0]
+ str r6, [r0, #4]
+ str r3, [r0, #8]
+ str r5, [r0, #12]
+ str r1, [r0, #16]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ movne r2, r1
+ str r2, [r0, #20]
+ add sp, sp, #8
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end1:
+ .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sqr_NIST_P192L
+ .align 2
+ .type mcl_fp_sqr_NIST_P192L,%function
+mcl_fp_sqr_NIST_P192L: @ @mcl_fp_sqr_NIST_P192L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ mov r8, r0
+ add r0, sp, #12
+ bl mcl_fpDbl_sqrPre6L(PLT)
+ add r12, sp, #12
+ ldr lr, [sp, #48]
+ ldr r2, [sp, #44]
+ ldr r3, [sp, #40]
+ mov r4, #0
+ ldm r12, {r0, r1, r5, r6, r12}
+ ldr r7, [sp, #36]
+ adds r0, r7, r0
+ str r0, [sp, #8] @ 4-byte Spill
+ adcs r0, r3, r1
+ mov r1, #0
+ adcs r10, r2, r5
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #52]
+ ldr r5, [sp, #32]
+ adcs r11, lr, r6
+ ldr r6, [sp, #56]
+ adcs r9, r0, r12
+ adcs r5, r6, r5
+ adcs r1, r1, #0
+ adc r12, r4, #0
+ ldr r4, [sp, #8] @ 4-byte Reload
+ adds r4, r4, r0
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [sp, #4] @ 4-byte Reload
+ adcs r4, r4, r6
+ adcs r7, r10, r7
+ adcs r3, r11, r3
+ adcs r2, r9, r2
+ adcs r5, r5, lr
+ adcs r1, r1, #0
+ adc r12, r12, #0
+ adds lr, r7, r0
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r3, r3, r6
+ adcs r2, r2, #0
+ adcs r7, r5, #0
+ adcs r1, r1, #0
+ adc r6, r12, #0
+ adds r5, r1, r0
+ mov r0, #0
+ adcs r11, r6, r4
+ adcs r10, r1, lr
+ adcs r12, r6, r3
+ adcs lr, r2, #0
+ adcs r4, r7, #0
+ adc r9, r0, #0
+ adds r7, r5, #1
+ str r4, [sp, #8] @ 4-byte Spill
+ adcs r2, r11, #0
+ adcs r3, r10, #1
+ adcs r6, r12, #0
+ adcs r1, lr, #0
+ adcs r0, r4, #0
+ sbc r4, r9, #0
+ ands r4, r4, #1
+ movne r7, r5
+ movne r2, r11
+ movne r3, r10
+ cmp r4, #0
+ movne r6, r12
+ movne r1, lr
+ str r7, [r8]
+ str r2, [r8, #4]
+ str r3, [r8, #8]
+ str r6, [r8, #12]
+ str r1, [r8, #16]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ movne r0, r1
+ str r0, [r8, #20]
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end2:
+ .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulNIST_P192L
+ .align 2
+ .type mcl_fp_mulNIST_P192L,%function
+mcl_fp_mulNIST_P192L: @ @mcl_fp_mulNIST_P192L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ mov r8, r0
+ add r0, sp, #12
+ bl mcl_fpDbl_mulPre6L(PLT)
+ add r12, sp, #12
+ ldr lr, [sp, #48]
+ ldr r2, [sp, #44]
+ ldr r3, [sp, #40]
+ mov r4, #0
+ ldm r12, {r0, r1, r5, r6, r12}
+ ldr r7, [sp, #36]
+ adds r0, r7, r0
+ str r0, [sp, #8] @ 4-byte Spill
+ adcs r0, r3, r1
+ mov r1, #0
+ adcs r10, r2, r5
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #52]
+ ldr r5, [sp, #32]
+ adcs r11, lr, r6
+ ldr r6, [sp, #56]
+ adcs r9, r0, r12
+ adcs r5, r6, r5
+ adcs r1, r1, #0
+ adc r12, r4, #0
+ ldr r4, [sp, #8] @ 4-byte Reload
+ adds r4, r4, r0
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [sp, #4] @ 4-byte Reload
+ adcs r4, r4, r6
+ adcs r7, r10, r7
+ adcs r3, r11, r3
+ adcs r2, r9, r2
+ adcs r5, r5, lr
+ adcs r1, r1, #0
+ adc r12, r12, #0
+ adds lr, r7, r0
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r3, r3, r6
+ adcs r2, r2, #0
+ adcs r7, r5, #0
+ adcs r1, r1, #0
+ adc r6, r12, #0
+ adds r5, r1, r0
+ mov r0, #0
+ adcs r11, r6, r4
+ adcs r10, r1, lr
+ adcs r12, r6, r3
+ adcs lr, r2, #0
+ adcs r4, r7, #0
+ adc r9, r0, #0
+ adds r7, r5, #1
+ str r4, [sp, #8] @ 4-byte Spill
+ adcs r2, r11, #0
+ adcs r3, r10, #1
+ adcs r6, r12, #0
+ adcs r1, lr, #0
+ adcs r0, r4, #0
+ sbc r4, r9, #0
+ ands r4, r4, #1
+ movne r7, r5
+ movne r2, r11
+ movne r3, r10
+ cmp r4, #0
+ movne r6, r12
+ movne r1, lr
+ str r7, [r8]
+ str r2, [r8, #4]
+ str r3, [r8, #8]
+ str r6, [r8, #12]
+ str r1, [r8, #16]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ movne r0, r1
+ str r0, [r8, #20]
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end3:
+ .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mod_NIST_P521L
+ .align 2
+ .type mcl_fpDbl_mod_NIST_P521L,%function
+mcl_fpDbl_mod_NIST_P521L: @ @mcl_fpDbl_mod_NIST_P521L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ ldr r6, [r1, #64]
+ mov r5, #255
+ ldr r3, [r1, #72]
+ ldr r2, [r1, #76]
+ mov r9, r0
+ orr r5, r5, #256
+ and r5, r6, r5
+ lsr r6, r6, #9
+ lsr r7, r3, #9
+ str r5, [sp, #40] @ 4-byte Spill
+ ldr r5, [r1, #68]
+ orr r12, r7, r2, lsl #23
+ lsr r2, r2, #9
+ lsr r4, r5, #9
+ orr r6, r6, r5, lsl #23
+ ldr r5, [r1]
+ orr r3, r4, r3, lsl #23
+ ldmib r1, {r4, r7, lr}
+ adds r5, r6, r5
+ ldr r6, [r1, #36]
+ str r5, [sp, #36] @ 4-byte Spill
+ ldr r5, [r1, #80]
+ adcs r3, r3, r4
+ str r3, [sp, #32] @ 4-byte Spill
+ adcs r7, r12, r7
+ ldr r3, [r1, #84]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r1, #88]
+ orr r2, r2, r5, lsl #23
+ lsr r5, r5, #9
+ adcs r12, r2, lr
+ ldr r2, [r1, #16]
+ orr r4, r5, r3, lsl #23
+ lsr r3, r3, #9
+ orr r3, r3, r7, lsl #23
+ lsr r5, r7, #9
+ ldr r7, [r1, #40]
+ adcs r2, r4, r2
+ ldr r4, [r1, #24]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [r1, #20]
+ adcs r2, r3, r2
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ orr r3, r5, r2, lsl #23
+ ldr r5, [r1, #28]
+ lsr r2, r2, #9
+ adcs lr, r3, r4
+ ldr r3, [r1, #96]
+ ldr r4, [r1, #44]
+ orr r2, r2, r3, lsl #23
+ adcs r2, r2, r5
+ ldr r5, [r1, #32]
+ str r2, [sp, #16] @ 4-byte Spill
+ lsr r2, r3, #9
+ ldr r3, [r1, #100]
+ orr r2, r2, r3, lsl #23
+ adcs r2, r2, r5
+ ldr r5, [r1, #48]
+ str r2, [sp, #12] @ 4-byte Spill
+ lsr r2, r3, #9
+ ldr r3, [r1, #104]
+ orr r2, r2, r3, lsl #23
+ adcs r0, r2, r6
+ lsr r2, r3, #9
+ ldr r3, [r1, #108]
+ ldr r6, [r1, #52]
+ str r0, [sp, #8] @ 4-byte Spill
+ orr r2, r2, r3, lsl #23
+ adcs r7, r2, r7
+ lsr r2, r3, #9
+ ldr r3, [r1, #112]
+ orr r2, r2, r3, lsl #23
+ lsr r3, r3, #9
+ adcs r2, r2, r4
+ ldr r4, [r1, #116]
+ orr r3, r3, r4, lsl #23
+ lsr r4, r4, #9
+ adcs r3, r3, r5
+ ldr r5, [r1, #120]
+ orr r4, r4, r5, lsl #23
+ adcs r11, r4, r6
+ lsr r4, r5, #9
+ ldr r5, [r1, #124]
+ ldr r6, [r1, #56]
+ orr r4, r4, r5, lsl #23
+ adcs r10, r4, r6
+ lsr r4, r5, #9
+ ldr r5, [r1, #128]
+ ldr r1, [r1, #60]
+ orr r4, r4, r5, lsl #23
+ adcs r8, r4, r1
+ ldr r4, [sp, #40] @ 4-byte Reload
+ lsr r1, r5, #9
+ ldr r5, [sp, #36] @ 4-byte Reload
+ adc r1, r1, r4
+ mov r4, #1
+ and r4, r4, r1, lsr #9
+ adds r5, r4, r5
+ ldr r4, [sp, #32] @ 4-byte Reload
+ str r5, [sp, #40] @ 4-byte Spill
+ adcs r6, r4, #0
+ ldr r4, [sp, #28] @ 4-byte Reload
+ str r6, [sp, #36] @ 4-byte Spill
+ adcs r0, r4, #0
+ and r4, r6, r5
+ ldr r5, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ and r4, r4, r0
+ adcs r0, r12, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ and r6, r4, r0
+ adcs r0, r5, #0
+ and r4, r6, r0
+ ldr r6, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r6, #0
+ ldr r6, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ and r5, r4, r0
+ adcs r0, lr, #0
+ and r5, r5, r0
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs lr, r6, #0
+ and r6, r5, lr
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adcs r5, r5, #0
+ and r12, r6, r5
+ adcs r6, r0, #0
+ adcs r7, r7, #0
+ and r4, r12, r6
+ adcs r2, r2, #0
+ and r4, r4, r7
+ adcs r3, r3, #0
+ and r4, r4, r2
+ adcs r0, r11, #0
+ and r4, r4, r3
+ adcs r10, r10, #0
+ and r4, r4, r0
+ adcs r11, r8, #0
+ and r4, r4, r10
+ adc r8, r1, #0
+ ldr r1, .LCPI4_0
+ and r4, r4, r11
+ orr r1, r8, r1
+ and r1, r4, r1
+ cmn r1, #1
+ beq .LBB4_2
+@ BB#1: @ %nonzero
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r1, [r9]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r1, [r9, #4]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r1, [r9, #8]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r1, [r9, #12]
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r1, [r9, #16]
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r1, [r9, #20]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r1, [r9, #24]
+ add r1, r9, #32
+ str lr, [r9, #28]
+ stm r1, {r5, r6, r7}
+ add r1, r9, #52
+ str r2, [r9, #44]
+ str r3, [r9, #48]
+ stm r1, {r0, r10, r11}
+ mov r1, #255
+ orr r1, r1, #256
+ and r1, r8, r1
+ str r1, [r9, #64]
+ b .LBB4_3
+.LBB4_2: @ %zero
+ mov r0, r9
+ mov r1, #0
+ mov r2, #68
+ bl memset(PLT)
+.LBB4_3: @ %zero
+ add sp, sp, #44
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+ .align 2
+@ BB#4:
+.LCPI4_0:
+ .long 4294966784 @ 0xfffffe00
+.Lfunc_end4:
+ .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre1L
+ .align 2
+ .type mcl_fp_mulUnitPre1L,%function
+mcl_fp_mulUnitPre1L: @ @mcl_fp_mulUnitPre1L
+ .fnstart
+@ BB#0:
+ ldr r1, [r1]
+ umull r3, r12, r1, r2
+ stm r0, {r3, r12}
+ mov pc, lr
+.Lfunc_end5:
+ .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre1L
+ .align 2
+ .type mcl_fpDbl_mulPre1L,%function
+mcl_fpDbl_mulPre1L: @ @mcl_fpDbl_mulPre1L
+ .fnstart
+@ BB#0:
+ ldr r1, [r1]
+ ldr r2, [r2]
+ umull r3, r12, r2, r1
+ stm r0, {r3, r12}
+ mov pc, lr
+.Lfunc_end6:
+ .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre1L
+ .align 2
+ .type mcl_fpDbl_sqrPre1L,%function
+mcl_fpDbl_sqrPre1L: @ @mcl_fpDbl_sqrPre1L
+ .fnstart
+@ BB#0:
+ ldr r1, [r1]
+ umull r2, r3, r1, r1
+ stm r0, {r2, r3}
+ mov pc, lr
+.Lfunc_end7:
+ .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont1L
+ .align 2
+ .type mcl_fp_mont1L,%function
+mcl_fp_mont1L: @ @mcl_fp_mont1L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, lr}
+ push {r4, r5, r6, lr}
+ ldr r12, [r2]
+ ldr r1, [r1]
+ mov r6, #0
+ umull lr, r2, r1, r12
+ ldr r12, [r3, #-4]
+ ldr r3, [r3]
+ mul r1, lr, r12
+ umull r12, r4, r1, r3
+ adds r5, r12, lr
+ adcs r5, r4, r2
+ umlal lr, r2, r1, r3
+ adc r6, r6, #0
+ subs r1, r2, r3
+ sbc r3, r6, #0
+ tst r3, #1
+ movne r1, r2
+ str r1, [r0]
+ pop {r4, r5, r6, lr}
+ mov pc, lr
+.Lfunc_end8:
+ .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF1L
+ .align 2
+ .type mcl_fp_montNF1L,%function
+mcl_fp_montNF1L: @ @mcl_fp_montNF1L
+ .fnstart
+@ BB#0:
+ .save {r11, lr}
+ push {r11, lr}
+ ldr r12, [r2]
+ ldr r1, [r1]
+ umull lr, r2, r1, r12
+ ldr r12, [r3, #-4]
+ ldr r3, [r3]
+ mul r1, lr, r12
+ umlal lr, r2, r1, r3
+ sub r1, r2, r3
+ cmp r1, #0
+ movge r2, r1
+ str r2, [r0]
+ pop {r11, lr}
+ mov pc, lr
+.Lfunc_end9:
+ .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed1L
+ .align 2
+ .type mcl_fp_montRed1L,%function
+mcl_fp_montRed1L: @ @mcl_fp_montRed1L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, lr}
+ push {r4, r5, r6, lr}
+ ldr r12, [r2, #-4]
+ ldr r3, [r1]
+ ldr r2, [r2]
+ ldr r1, [r1, #4]
+ mov r6, #0
+ mul lr, r3, r12
+ umull r12, r4, lr, r2
+ adds r5, r3, r12
+ adcs r5, r1, r4
+ umlal r3, r1, lr, r2
+ adc r6, r6, #0
+ subs r2, r1, r2
+ sbc r3, r6, #0
+ tst r3, #1
+ movne r2, r1
+ str r2, [r0]
+ pop {r4, r5, r6, lr}
+ mov pc, lr
+.Lfunc_end10:
+ .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre1L
+ .align 2
+ .type mcl_fp_addPre1L,%function
+mcl_fp_addPre1L: @ @mcl_fp_addPre1L
+ .fnstart
+@ BB#0:
+ ldr r1, [r1]
+ ldr r2, [r2]
+ adds r1, r2, r1
+ str r1, [r0]
+ mov r0, #0
+ adc r0, r0, #0
+ mov pc, lr
+.Lfunc_end11:
+ .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre1L
+ .align 2
+ .type mcl_fp_subPre1L,%function
+mcl_fp_subPre1L: @ @mcl_fp_subPre1L
+ .fnstart
+@ BB#0:
+ ldr r2, [r2]
+ ldr r1, [r1]
+ subs r1, r1, r2
+ str r1, [r0]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ mov pc, lr
+.Lfunc_end12:
+ .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_1L
+ .align 2
+ .type mcl_fp_shr1_1L,%function
+mcl_fp_shr1_1L: @ @mcl_fp_shr1_1L
+ .fnstart
+@ BB#0:
+ ldr r1, [r1]
+ lsr r1, r1, #1
+ str r1, [r0]
+ mov pc, lr
+.Lfunc_end13:
+ .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add1L
+ .align 2
+ .type mcl_fp_add1L,%function
+mcl_fp_add1L: @ @mcl_fp_add1L
+ .fnstart
+@ BB#0:
+ ldr r1, [r1]
+ ldr r2, [r2]
+ ldr r3, [r3]
+ adds r1, r2, r1
+ mov r2, #0
+ str r1, [r0]
+ adc r2, r2, #0
+ subs r1, r1, r3
+ sbc r2, r2, #0
+ tst r2, #1
+ streq r1, [r0]
+ mov pc, lr
+.Lfunc_end14:
+ .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF1L
+ .align 2
+ .type mcl_fp_addNF1L,%function
+mcl_fp_addNF1L: @ @mcl_fp_addNF1L
+ .fnstart
+@ BB#0:
+ ldr r1, [r1]
+ ldr r2, [r2]
+ add r1, r2, r1
+ ldr r2, [r3]
+ sub r2, r1, r2
+ cmp r2, #0
+ movlt r2, r1
+ str r2, [r0]
+ mov pc, lr
+.Lfunc_end15:
+ .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub1L
+ .align 2
+ .type mcl_fp_sub1L,%function
+mcl_fp_sub1L: @ @mcl_fp_sub1L
+ .fnstart
+@ BB#0:
+ ldr r2, [r2]
+ ldr r1, [r1]
+ subs r1, r1, r2
+ mov r2, #0
+ sbc r2, r2, #0
+ str r1, [r0]
+ tst r2, #1
+ ldrne r2, [r3]
+ addne r1, r2, r1
+ strne r1, [r0]
+ movne pc, lr
+ mov pc, lr
+.Lfunc_end16:
+ .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF1L
+ .align 2
+ .type mcl_fp_subNF1L,%function
+mcl_fp_subNF1L: @ @mcl_fp_subNF1L
+ .fnstart
+@ BB#0:
+ ldr r2, [r2]
+ ldr r1, [r1]
+ sub r1, r1, r2
+ ldr r2, [r3]
+ cmp r1, #0
+ addlt r1, r1, r2
+ str r1, [r0]
+ mov pc, lr
+.Lfunc_end17:
+ .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add1L
+ .align 2
+ .type mcl_fpDbl_add1L,%function
+mcl_fpDbl_add1L: @ @mcl_fpDbl_add1L
+ .fnstart
+@ BB#0:
+ .save {r11, lr}
+ push {r11, lr}
+ ldm r1, {r12, lr}
+ ldm r2, {r1, r2}
+ ldr r3, [r3]
+ adds r1, r1, r12
+ str r1, [r0]
+ mov r1, #0
+ adcs r2, r2, lr
+ adc r1, r1, #0
+ subs r3, r2, r3
+ sbc r1, r1, #0
+ tst r1, #1
+ movne r3, r2
+ str r3, [r0, #4]
+ pop {r11, lr}
+ mov pc, lr
+.Lfunc_end18:
+ .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub1L
+ .align 2
+ .type mcl_fpDbl_sub1L,%function
+mcl_fpDbl_sub1L: @ @mcl_fpDbl_sub1L
+ .fnstart
+@ BB#0:
+ .save {r11, lr}
+ push {r11, lr}
+ ldm r2, {r12, lr}
+ ldr r2, [r1]
+ ldr r1, [r1, #4]
+ ldr r3, [r3]
+ subs r2, r2, r12
+ str r2, [r0]
+ mov r2, #0
+ sbcs r1, r1, lr
+ sbc r2, r2, #0
+ tst r2, #1
+ addne r1, r1, r3
+ str r1, [r0, #4]
+ pop {r11, lr}
+ mov pc, lr
+.Lfunc_end19:
+ .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre2L
+ .align 2
+ .type mcl_fp_mulUnitPre2L,%function
+mcl_fp_mulUnitPre2L: @ @mcl_fp_mulUnitPre2L
+ .fnstart
+@ BB#0:
+ .save {r11, lr}
+ push {r11, lr}
+ ldm r1, {r3, lr}
+ umull r12, r1, r3, r2
+ mov r3, #0
+ umlal r1, r3, lr, r2
+ str r12, [r0]
+ stmib r0, {r1, r3}
+ pop {r11, lr}
+ mov pc, lr
+.Lfunc_end20:
+ .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre2L
+ .align 2
+ .type mcl_fpDbl_mulPre2L,%function
+mcl_fpDbl_mulPre2L: @ @mcl_fpDbl_mulPre2L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, lr}
+ push {r4, r5, r6, lr}
+ ldr r3, [r2]
+ ldm r1, {r12, lr}
+ ldr r2, [r2, #4]
+ mov r5, #0
+ umull r1, r4, r12, r3
+ umlal r4, r5, lr, r3
+ umull r3, r6, r12, r2
+ str r1, [r0]
+ mov r1, #0
+ adds r3, r3, r4
+ str r3, [r0, #4]
+ umull r3, r4, lr, r2
+ adcs r2, r3, r5
+ adc r1, r1, #0
+ adds r2, r2, r6
+ adc r1, r1, r4
+ str r2, [r0, #8]
+ str r1, [r0, #12]
+ pop {r4, r5, r6, lr}
+ mov pc, lr
+.Lfunc_end21:
+ .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre2L
+ .align 2
+ .type mcl_fpDbl_sqrPre2L,%function
+mcl_fpDbl_sqrPre2L: @ @mcl_fpDbl_sqrPre2L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, lr}
+ push {r4, r5, r6, lr}
+ ldr r2, [r1]
+ ldr r1, [r1, #4]
+ mov r4, #0
+ mov lr, #0
+ umull r12, r3, r2, r2
+ umull r5, r6, r1, r2
+ umlal r3, r4, r1, r2
+ str r12, [r0]
+ adds r2, r3, r5
+ umull r3, r5, r1, r1
+ adcs r1, r4, r3
+ str r2, [r0, #4]
+ adc r3, lr, #0
+ adds r1, r1, r6
+ adc r3, r3, r5
+ str r1, [r0, #8]
+ str r3, [r0, #12]
+ pop {r4, r5, r6, lr}
+ mov pc, lr
+.Lfunc_end22:
+ .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont2L
+ .align 2
+ .type mcl_fp_mont2L,%function
+mcl_fp_mont2L: @ @mcl_fp_mont2L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldm r1, {r12, lr}
+ ldm r2, {r1, r2}
+ mov r7, #0
+ mov r5, #0
+ mov r6, #0
+ umull r8, r9, r2, r12
+ umull r11, r4, r12, r1
+ umlal r9, r7, r2, lr
+ umlal r4, r5, lr, r1
+ ldmda r3, {r12, lr}
+ ldr r10, [r3, #4]
+ mul r1, r11, r12
+ umull r3, r2, r1, lr
+ adds r3, r3, r11
+ mov r3, #0
+ umlal r2, r3, r1, r10
+ adcs r1, r2, r4
+ adcs r2, r3, r5
+ adc r3, r6, #0
+ adds r1, r1, r8
+ adcs r8, r2, r9
+ mul r5, r1, r12
+ adcs r3, r3, r7
+ umull r7, r2, r5, lr
+ adc r4, r6, #0
+ umlal r2, r6, r5, r10
+ adds r1, r7, r1
+ adcs r1, r2, r8
+ adcs r2, r6, r3
+ adc r3, r4, #0
+ subs r7, r1, lr
+ sbcs r6, r2, r10
+ sbc r3, r3, #0
+ ands r3, r3, #1
+ movne r7, r1
+ movne r6, r2
+ str r7, [r0]
+ str r6, [r0, #4]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end23:
+ .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF2L
+ .align 2
+ .type mcl_fp_montNF2L,%function
+mcl_fp_montNF2L: @ @mcl_fp_montNF2L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldm r2, {r12, lr}
+ ldr r11, [r1]
+ ldr r8, [r3, #-4]
+ ldr r7, [r3]
+ ldr r9, [r1, #4]
+ ldr r3, [r3, #4]
+ umull r4, r5, r11, r12
+ mul r6, r4, r8
+ umull r1, r10, r6, r7
+ adds r1, r1, r4
+ mov r4, #0
+ umlal r5, r4, r9, r12
+ umull r2, r12, r6, r3
+ mov r1, #0
+ adcs r2, r2, r5
+ adc r4, r4, #0
+ adds r2, r2, r10
+ adc r6, r4, r12
+ umull r5, r4, lr, r11
+ adds r2, r5, r2
+ umlal r4, r1, lr, r9
+ adcs r9, r4, r6
+ mul r5, r2, r8
+ adc lr, r1, #0
+ umull r1, r6, r5, r7
+ umull r4, r12, r5, r3
+ adds r1, r1, r2
+ adcs r1, r4, r9
+ adc r2, lr, #0
+ adds r1, r1, r6
+ adc r2, r2, r12
+ subs r7, r1, r7
+ sbc r3, r2, r3
+ cmp r3, #0
+ movlt r7, r1
+ movlt r3, r2
+ str r7, [r0]
+ str r3, [r0, #4]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end24:
+ .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed2L
+ .align 2
+ .type mcl_fp_montRed2L,%function
+mcl_fp_montRed2L: @ @mcl_fp_montRed2L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, lr}
+ ldr r12, [r2, #-4]
+ ldm r2, {r3, lr}
+ ldm r1, {r2, r9, r10}
+ ldr r8, [r1, #12]
+ mov r5, #0
+ mov r7, #0
+ mul r6, r2, r12
+ umull r1, r4, r6, r3
+ umlal r4, r5, r6, lr
+ adds r1, r2, r1
+ adcs r1, r9, r4
+ adcs r9, r10, r5
+ mul r6, r1, r12
+ adcs r8, r8, #0
+ umull r2, r4, r6, r3
+ adc r5, r7, #0
+ umlal r4, r7, r6, lr
+ adds r1, r2, r1
+ adcs r1, r4, r9
+ adcs r2, r7, r8
+ adc r7, r5, #0
+ subs r3, r1, r3
+ sbcs r6, r2, lr
+ sbc r7, r7, #0
+ ands r7, r7, #1
+ movne r3, r1
+ movne r6, r2
+ stm r0, {r3, r6}
+ pop {r4, r5, r6, r7, r8, r9, r10, lr}
+ mov pc, lr
+.Lfunc_end25:
+ .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre2L
+ .align 2
+ .type mcl_fp_addPre2L,%function
+mcl_fp_addPre2L: @ @mcl_fp_addPre2L
+ .fnstart
+@ BB#0:
+ ldm r1, {r3, r12}
+ ldm r2, {r1, r2}
+ adds r1, r1, r3
+ adcs r2, r2, r12
+ stm r0, {r1, r2}
+ mov r0, #0
+ adc r0, r0, #0
+ mov pc, lr
+.Lfunc_end26:
+ .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre2L
+ .align 2
+ .type mcl_fp_subPre2L,%function
+mcl_fp_subPre2L: @ @mcl_fp_subPre2L
+ .fnstart
+@ BB#0:
+ ldm r2, {r3, r12}
+ ldr r2, [r1]
+ ldr r1, [r1, #4]
+ subs r2, r2, r3
+ sbcs r1, r1, r12
+ str r2, [r0]
+ str r1, [r0, #4]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ mov pc, lr
+.Lfunc_end27:
+ .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_2L
+ .align 2
+ .type mcl_fp_shr1_2L,%function
+mcl_fp_shr1_2L: @ @mcl_fp_shr1_2L
+ .fnstart
+@ BB#0:
+ ldr r2, [r1]
+ ldr r1, [r1, #4]
+ lsrs r3, r1, #1
+ lsr r1, r1, #1
+ rrx r2, r2
+ str r2, [r0]
+ str r1, [r0, #4]
+ mov pc, lr
+.Lfunc_end28:
+ .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add2L
+ .align 2
+ .type mcl_fp_add2L,%function
+mcl_fp_add2L: @ @mcl_fp_add2L
+ .fnstart
+@ BB#0:
+ .save {r4, lr}
+ push {r4, lr}
+ ldm r1, {r12, lr}
+ ldm r2, {r1, r2}
+ adds r12, r1, r12
+ mov r1, #0
+ adcs r2, r2, lr
+ str r12, [r0]
+ str r2, [r0, #4]
+ adc lr, r1, #0
+ ldm r3, {r1, r4}
+ subs r3, r12, r1
+ sbcs r2, r2, r4
+ sbc r1, lr, #0
+ tst r1, #1
+ streq r3, [r0]
+ streq r2, [r0, #4]
+ pop {r4, lr}
+ mov pc, lr
+.Lfunc_end29:
+ .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF2L
+ .align 2
+ .type mcl_fp_addNF2L,%function
+mcl_fp_addNF2L: @ @mcl_fp_addNF2L
+ .fnstart
+@ BB#0:
+ .save {r4, lr}
+ push {r4, lr}
+ ldm r1, {r12, lr}
+ ldm r2, {r1, r2}
+ adds r1, r1, r12
+ adc r4, r2, lr
+ ldm r3, {r12, lr}
+ subs r3, r1, r12
+ sbc r2, r4, lr
+ cmp r2, #0
+ movlt r3, r1
+ movlt r2, r4
+ str r3, [r0]
+ str r2, [r0, #4]
+ pop {r4, lr}
+ mov pc, lr
+.Lfunc_end30:
+ .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub2L
+ .align 2
+ .type mcl_fp_sub2L,%function
+mcl_fp_sub2L: @ @mcl_fp_sub2L
+ .fnstart
+@ BB#0:
+ .save {r4, lr}
+ push {r4, lr}
+ ldm r2, {r12, lr}
+ ldm r1, {r2, r4}
+ subs r1, r2, r12
+ sbcs r2, r4, lr
+ mov r4, #0
+ sbc r4, r4, #0
+ stm r0, {r1, r2}
+ tst r4, #1
+ popeq {r4, lr}
+ moveq pc, lr
+ ldr r4, [r3]
+ ldr r3, [r3, #4]
+ adds r1, r4, r1
+ adc r2, r3, r2
+ stm r0, {r1, r2}
+ pop {r4, lr}
+ mov pc, lr
+.Lfunc_end31:
+ .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF2L
+ .align 2
+ .type mcl_fp_subNF2L,%function
+mcl_fp_subNF2L: @ @mcl_fp_subNF2L
+ .fnstart
+@ BB#0:
+ .save {r4, lr}
+ push {r4, lr}
+ ldm r2, {r12, lr}
+ ldr r2, [r1]
+ ldr r1, [r1, #4]
+ subs r4, r2, r12
+ sbc r1, r1, lr
+ ldm r3, {r12, lr}
+ adds r3, r4, r12
+ adc r2, r1, lr
+ cmp r1, #0
+ movge r3, r4
+ movge r2, r1
+ str r3, [r0]
+ str r2, [r0, #4]
+ pop {r4, lr}
+ mov pc, lr
+.Lfunc_end32:
+ .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add2L
+ .align 2
+ .type mcl_fpDbl_add2L,%function
+mcl_fpDbl_add2L: @ @mcl_fpDbl_add2L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r11, lr}
+ push {r4, r5, r6, r7, r11, lr}
+ ldm r1, {r12, lr}
+ ldr r4, [r1, #8]
+ ldr r1, [r1, #12]
+ ldm r2, {r5, r6, r7}
+ ldr r2, [r2, #12]
+ adds r5, r5, r12
+ adcs r6, r6, lr
+ str r5, [r0]
+ adcs r7, r7, r4
+ str r6, [r0, #4]
+ mov r6, #0
+ adcs r1, r2, r1
+ adc r2, r6, #0
+ ldr r6, [r3]
+ ldr r3, [r3, #4]
+ subs r6, r7, r6
+ sbcs r3, r1, r3
+ sbc r2, r2, #0
+ ands r2, r2, #1
+ movne r6, r7
+ movne r3, r1
+ str r6, [r0, #8]
+ str r3, [r0, #12]
+ pop {r4, r5, r6, r7, r11, lr}
+ mov pc, lr
+.Lfunc_end33:
+ .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub2L
+ .align 2
+ .type mcl_fpDbl_sub2L,%function
+mcl_fpDbl_sub2L: @ @mcl_fpDbl_sub2L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r11, lr}
+ push {r4, r5, r6, r7, r11, lr}
+ ldm r2, {r12, lr}
+ ldr r4, [r2, #8]
+ ldr r2, [r2, #12]
+ ldm r1, {r5, r6, r7}
+ ldr r1, [r1, #12]
+ subs r5, r5, r12
+ sbcs r6, r6, lr
+ str r5, [r0]
+ sbcs r7, r7, r4
+ str r6, [r0, #4]
+ mov r6, #0
+ sbcs r1, r1, r2
+ sbc r2, r6, #0
+ ldr r6, [r3]
+ ldr r3, [r3, #4]
+ adds r6, r7, r6
+ adc r3, r1, r3
+ ands r2, r2, #1
+ moveq r6, r7
+ moveq r3, r1
+ str r6, [r0, #8]
+ str r3, [r0, #12]
+ pop {r4, r5, r6, r7, r11, lr}
+ mov pc, lr
+.Lfunc_end34:
+ .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre3L
+ .align 2
+ .type mcl_fp_mulUnitPre3L,%function
+mcl_fp_mulUnitPre3L: @ @mcl_fp_mulUnitPre3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, r5}
+ umull lr, r4, r12, r2
+ umull r1, r12, r5, r2
+ umull r7, r8, r3, r2
+ mov r5, r1
+ mov r6, r4
+ str lr, [r0]
+ umlal r6, r5, r3, r2
+ adds r2, r4, r7
+ adcs r1, r8, r1
+ str r6, [r0, #4]
+ str r5, [r0, #8]
+ adc r1, r12, #0
+ str r1, [r0, #12]
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end35:
+ .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre3L
+ .align 2
+ .type mcl_fpDbl_mulPre3L,%function
+mcl_fpDbl_mulPre3L: @ @mcl_fpDbl_mulPre3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldr r3, [r2]
+ ldm r1, {r12, lr}
+ ldr r1, [r1, #8]
+ umull r4, r5, r12, r3
+ str r4, [r0]
+ umull r4, r6, lr, r3
+ adds r4, r5, r4
+ umull r7, r4, r1, r3
+ adcs r6, r6, r7
+ umlal r5, r7, lr, r3
+ ldr r3, [r2, #4]
+ ldr r2, [r2, #8]
+ adc r8, r4, #0
+ umull r6, r10, r12, r3
+ adds r9, r6, r5
+ umull r6, r5, lr, r3
+ adcs r6, r6, r7
+ umull r7, r4, r1, r3
+ str r9, [r0, #4]
+ adcs r3, r7, r8
+ mov r8, #0
+ adc r7, r8, #0
+ adds r6, r6, r10
+ adcs r11, r3, r5
+ umull r5, r9, r1, r2
+ umull r1, r10, lr, r2
+ adc r4, r7, r4
+ umull r7, r3, r12, r2
+ adds r2, r6, r7
+ adcs r1, r11, r1
+ str r2, [r0, #8]
+ adcs r2, r4, r5
+ adc r7, r8, #0
+ adds r1, r1, r3
+ str r1, [r0, #12]
+ adcs r1, r2, r10
+ str r1, [r0, #16]
+ adc r1, r7, r9
+ str r1, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end36:
+ .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre3L
+ .align 2
+ .type mcl_fpDbl_sqrPre3L,%function
+mcl_fpDbl_sqrPre3L: @ @mcl_fpDbl_sqrPre3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, lr}
+ ldm r1, {r2, r3, r12}
+ mov r10, #0
+ umull r1, lr, r2, r2
+ umull r7, r4, r3, r2
+ str r1, [r0]
+ umull r1, r8, r12, r2
+ mov r5, lr
+ mov r6, r1
+ umlal r5, r6, r3, r2
+ adds r2, lr, r7
+ adcs r2, r4, r1
+ adc r2, r8, #0
+ adds lr, r5, r7
+ umull r5, r9, r3, r3
+ adcs r5, r6, r5
+ umull r6, r7, r12, r3
+ str lr, [r0, #4]
+ adcs r2, r2, r6
+ adc r3, r10, #0
+ adds r4, r5, r4
+ adcs r2, r2, r9
+ adc r3, r3, r7
+ adds r1, r4, r1
+ umull r5, r4, r12, r12
+ str r1, [r0, #8]
+ adcs r1, r2, r6
+ adcs r2, r3, r5
+ adc r3, r10, #0
+ adds r1, r1, r8
+ str r1, [r0, #12]
+ adcs r1, r2, r7
+ str r1, [r0, #16]
+ adc r1, r3, r4
+ str r1, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, lr}
+ mov pc, lr
+.Lfunc_end37:
+ .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont3L
+ .align 2
+ .type mcl_fp_mont3L,%function
+mcl_fp_mont3L: @ @mcl_fp_mont3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r2, {r8, lr}
+ ldr r0, [r2, #8]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldmib r1, {r4, r9}
+ ldr r2, [r3, #-4]
+ umull r7, r6, r0, r8
+ ldr r0, [r3]
+ ldr r1, [r3, #8]
+ ldr r10, [r3, #4]
+ str r7, [sp, #12] @ 4-byte Spill
+ mul r5, r7, r2
+ str r2, [sp, #16] @ 4-byte Spill
+ str r9, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #40] @ 4-byte Spill
+ str r1, [sp, #28] @ 4-byte Spill
+ umull r12, r2, r5, r1
+ umull r1, r3, r5, r0
+ umull r0, r7, r9, r8
+ umull r11, r9, r4, r8
+ str r7, [sp] @ 4-byte Spill
+ adds r7, r6, r11
+ str r1, [sp, #8] @ 4-byte Spill
+ mov r1, r3
+ str r2, [sp, #4] @ 4-byte Spill
+ mov r2, r12
+ adcs r7, r9, r0
+ umlal r1, r2, r5, r10
+ umlal r6, r0, r4, r8
+ mov r8, #0
+ ldr r7, [sp] @ 4-byte Reload
+ adc r9, r7, #0
+ umull r7, r11, r5, r10
+ ldr r5, [sp, #8] @ 4-byte Reload
+ adds r3, r3, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adcs r3, r11, r12
+ ldr r3, [sp, #4] @ 4-byte Reload
+ adc r3, r3, #0
+ adds r7, r5, r7
+ adcs r11, r1, r6
+ adcs r12, r2, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r9, r3, r9
+ ldr r3, [sp, #36] @ 4-byte Reload
+ adc r8, r8, #0
+ umull r6, r7, lr, r0
+ umull r5, r0, lr, r4
+ umull r1, r2, lr, r3
+ adds r5, r2, r5
+ adcs r0, r0, r6
+ umlal r2, r6, lr, r4
+ adc r0, r7, #0
+ adds r1, r11, r1
+ ldr r11, [sp, #16] @ 4-byte Reload
+ adcs r2, r12, r2
+ ldr r12, [sp, #28] @ 4-byte Reload
+ str r2, [sp, #12] @ 4-byte Spill
+ adcs r2, r9, r6
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #4] @ 4-byte Spill
+ mov r0, #0
+ mul r6, r1, r11
+ adc r0, r0, #0
+ umull r7, r9, r6, r12
+ str r0, [sp] @ 4-byte Spill
+ mov r5, r7
+ umull r8, r0, r6, r2
+ umull lr, r2, r6, r10
+ mov r3, r0
+ adds r0, r0, lr
+ ldr lr, [sp, #36] @ 4-byte Reload
+ adcs r0, r2, r7
+ umlal r3, r5, r6, r10
+ adc r0, r9, #0
+ adds r1, r8, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r1, r3, r1
+ ldr r3, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r8, r5, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r9, r0, r1
+ ldr r0, [sp] @ 4-byte Reload
+ umull r1, r2, r3, lr
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ umull r6, r7, r3, r0
+ umull r5, r0, r3, r4
+ adds r5, r2, r5
+ adcs r0, r0, r6
+ umlal r2, r6, r3, r4
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adc r0, r7, #0
+ adds r1, r3, r1
+ adcs r2, r8, r2
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r9, r9, r6
+ mul r6, r1, r11
+ umull r7, r4, r6, r12
+ ldr r12, [sp, #40] @ 4-byte Reload
+ mov r5, r7
+ adcs r0, r2, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ umull r11, r3, r6, r12
+ adc r8, r0, #0
+ umull r0, lr, r6, r10
+ mov r2, r3
+ adds r0, r3, r0
+ ldr r3, [sp, #32] @ 4-byte Reload
+ umlal r2, r5, r6, r10
+ adcs r0, lr, r7
+ adc r0, r4, #0
+ adds r1, r11, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r2, r1
+ adcs r2, r5, r9
+ ldr r5, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r3
+ adc r3, r8, #0
+ subs r7, r1, r12
+ sbcs r6, r2, r10
+ sbcs r5, r0, r5
+ sbc r3, r3, #0
+ ands r3, r3, #1
+ movne r5, r0
+ ldr r0, [sp, #24] @ 4-byte Reload
+ movne r7, r1
+ movne r6, r2
+ str r7, [r0]
+ str r6, [r0, #4]
+ str r5, [r0, #8]
+ add sp, sp, #44
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end38:
+ .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF3L
+ .align 2
+ .type mcl_fp_montNF3L,%function
+mcl_fp_montNF3L: @ @mcl_fp_montNF3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #68
+ sub sp, sp, #68
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r8, [r1]
+ ldmib r1, {r6, r9}
+ ldm r2, {r4, r7}
+ ldr r0, [r2, #8]
+ mov r10, r3
+ umull r3, r1, r0, r9
+ str r1, [sp, #52] @ 4-byte Spill
+ umull r1, r2, r0, r8
+ str r3, [sp, #44] @ 4-byte Spill
+ str r1, [sp, #48] @ 4-byte Spill
+ str r2, [sp, #40] @ 4-byte Spill
+ mov r1, r2
+ mov r2, r3
+ umull r3, r5, r0, r6
+ umlal r1, r2, r0, r6
+ str r3, [sp, #32] @ 4-byte Spill
+ umull r3, r0, r7, r6
+ str r5, [sp, #36] @ 4-byte Spill
+ str r1, [sp, #56] @ 4-byte Spill
+ str r2, [sp, #60] @ 4-byte Spill
+ umull r2, r1, r7, r9
+ str r0, [sp, #8] @ 4-byte Spill
+ str r3, [sp, #4] @ 4-byte Spill
+ str r1, [sp, #28] @ 4-byte Spill
+ umull r1, r11, r7, r8
+ str r2, [sp, #16] @ 4-byte Spill
+ str r1, [sp, #24] @ 4-byte Spill
+ mov r1, r2
+ str r11, [sp, #12] @ 4-byte Spill
+ umlal r11, r1, r7, r6
+ umull r0, r7, r6, r4
+ str r1, [sp, #20] @ 4-byte Spill
+ umull lr, r1, r9, r4
+ umull r9, r2, r8, r4
+ ldr r8, [r10, #-4]
+ adds r0, r2, r0
+ str r1, [sp] @ 4-byte Spill
+ mov r1, r2
+ mov r12, lr
+ adcs r0, r7, lr
+ umlal r1, r12, r6, r4
+ ldr r0, [sp] @ 4-byte Reload
+ ldm r10, {r6, r7}
+ mul r2, r9, r8
+ adc r3, r0, #0
+ ldr r0, [r10, #8]
+ umull r4, lr, r2, r6
+ adds r4, r4, r9
+ umull r4, r9, r2, r7
+ adcs r1, r4, r1
+ umull r4, r5, r2, r0
+ adcs r2, r4, r12
+ ldr r4, [sp, #4] @ 4-byte Reload
+ adc r3, r3, #0
+ adds r1, r1, lr
+ adcs r2, r2, r9
+ adc r3, r3, r5
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adds r5, r5, r4
+ ldr r4, [sp, #8] @ 4-byte Reload
+ ldr r5, [sp, #16] @ 4-byte Reload
+ adcs r5, r4, r5
+ ldr r4, [sp, #24] @ 4-byte Reload
+ ldr r5, [sp, #28] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r1, r4, r1
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adcs r2, r11, r2
+ adcs r12, r4, r3
+ mul r4, r1, r8
+ umull r3, r9, r4, r6
+ adc lr, r5, #0
+ adds r1, r3, r1
+ umull r1, r3, r4, r7
+ adcs r1, r1, r2
+ umull r2, r5, r4, r0
+ adcs r2, r2, r12
+ adc r4, lr, #0
+ adds r1, r1, r9
+ adcs r12, r2, r3
+ ldr r2, [sp, #40] @ 4-byte Reload
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adc r9, r4, r5
+ adds r5, r2, r3
+ ldr r2, [sp, #44] @ 4-byte Reload
+ ldr r3, [sp, #36] @ 4-byte Reload
+ adcs r5, r3, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ ldr r5, [sp, #60] @ 4-byte Reload
+ adc lr, r2, #0
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adds r1, r2, r1
+ mul r4, r1, r8
+ umull r10, r2, r4, r0
+ umull r3, r8, r4, r7
+ str r2, [sp, #52] @ 4-byte Spill
+ umull r2, r11, r4, r6
+ ldr r4, [sp, #56] @ 4-byte Reload
+ adcs r4, r4, r12
+ adcs r12, r5, r9
+ adc r5, lr, #0
+ adds r1, r2, r1
+ adcs r1, r3, r4
+ adcs r2, r10, r12
+ adc r3, r5, #0
+ ldr r5, [sp, #52] @ 4-byte Reload
+ adds r1, r1, r11
+ adcs r2, r2, r8
+ adc r3, r3, r5
+ subs r6, r1, r6
+ sbcs r7, r2, r7
+ sbc r0, r3, r0
+ asr r5, r0, #31
+ cmp r5, #0
+ movlt r6, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ movlt r7, r2
+ movlt r0, r3
+ stm r1, {r6, r7}
+ str r0, [r1, #8]
+ add sp, sp, #68
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end39:
+ .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed3L
+ .align 2
+ .type mcl_fp_montRed3L,%function
+mcl_fp_montRed3L: @ @mcl_fp_montRed3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #28
+ sub sp, sp, #28
+ ldr r5, [r2]
+ ldr lr, [r2, #-4]
+ ldr r3, [r2, #4]
+ ldr r2, [r2, #8]
+ str r0, [sp, #24] @ 4-byte Spill
+ str r5, [sp, #20] @ 4-byte Spill
+ str r2, [sp] @ 4-byte Spill
+ ldm r1, {r4, r7}
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r1, #8]
+ mul r6, r4, lr
+ umull r10, r8, r6, r3
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r1, #12]
+ str r7, [sp, #12] @ 4-byte Spill
+ umull r7, r9, r6, r2
+ umull r11, r2, r6, r5
+ mov r0, r2
+ adds r2, r2, r10
+ mov r12, r7
+ adcs r2, r8, r7
+ umlal r0, r12, r6, r3
+ ldr r8, [r1, #20]
+ ldr r1, [r1, #16]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adc r10, r9, #0
+ adds r7, r4, r11
+ mov r11, lr
+ adcs r9, r2, r0
+ ldr r2, [sp] @ 4-byte Reload
+ mul r7, r9, lr
+ umull lr, r0, r7, r2
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r4, r0, r7, r5
+ ldr r5, [sp, #16] @ 4-byte Reload
+ mov r6, lr
+ str r4, [sp, #4] @ 4-byte Spill
+ mov r4, r0
+ umlal r4, r6, r7, r3
+ adcs r12, r5, r12
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adcs r10, r5, r10
+ adcs r1, r1, #0
+ str r1, [sp, #16] @ 4-byte Spill
+ adcs r1, r8, #0
+ str r1, [sp, #12] @ 4-byte Spill
+ mov r1, #0
+ adc r8, r1, #0
+ umull r1, r5, r7, r3
+ ldr r7, [sp, #16] @ 4-byte Reload
+ adds r1, r0, r1
+ adcs r0, r5, lr
+ ldr r1, [sp, #4] @ 4-byte Reload
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r1, r1, r9
+ adcs r1, r4, r12
+ adcs lr, r6, r10
+ ldr r6, [sp, #20] @ 4-byte Reload
+ mul r5, r1, r11
+ mov r11, r2
+ adcs r0, r0, r7
+ umull r4, r12, r5, r2
+ umull r2, r7, r5, r3
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r10, r0, #0
+ umull r9, r0, r5, r6
+ adc r8, r8, #0
+ adds r2, r0, r2
+ mov r2, r4
+ adcs r4, r7, r4
+ adc r7, r12, #0
+ adds r1, r9, r1
+ umlal r0, r2, r5, r3
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, lr
+ adcs r1, r2, r1
+ adcs r2, r7, r10
+ adc r7, r8, #0
+ subs r6, r0, r6
+ sbcs r3, r1, r3
+ sbcs r5, r2, r11
+ sbc r7, r7, #0
+ ands r7, r7, #1
+ movne r6, r0
+ ldr r0, [sp, #24] @ 4-byte Reload
+ movne r3, r1
+ movne r5, r2
+ str r6, [r0]
+ stmib r0, {r3, r5}
+ add sp, sp, #28
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end40:
+ .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre3L
+ .align 2
+ .type mcl_fp_addPre3L,%function
+mcl_fp_addPre3L: @ @mcl_fp_addPre3L
+ .fnstart
+@ BB#0:
+ .save {r4, lr}
+ push {r4, lr}
+ ldm r1, {r3, r12, lr}
+ ldm r2, {r1, r4}
+ ldr r2, [r2, #8]
+ adds r1, r1, r3
+ adcs r3, r4, r12
+ adcs r2, r2, lr
+ stm r0, {r1, r3}
+ str r2, [r0, #8]
+ mov r0, #0
+ adc r0, r0, #0
+ pop {r4, lr}
+ mov pc, lr
+.Lfunc_end41:
+ .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre3L
+ .align 2
+ .type mcl_fp_subPre3L,%function
+mcl_fp_subPre3L: @ @mcl_fp_subPre3L
+ .fnstart
+@ BB#0:
+ .save {r4, lr}
+ push {r4, lr}
+ ldm r2, {r3, r12, lr}
+ ldm r1, {r2, r4}
+ ldr r1, [r1, #8]
+ subs r2, r2, r3
+ sbcs r3, r4, r12
+ sbcs r1, r1, lr
+ stm r0, {r2, r3}
+ str r1, [r0, #8]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ pop {r4, lr}
+ mov pc, lr
+.Lfunc_end42:
+ .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_3L
+ .align 2
+ .type mcl_fp_shr1_3L,%function
+mcl_fp_shr1_3L: @ @mcl_fp_shr1_3L
+ .fnstart
+@ BB#0:
+ ldr r3, [r1, #4]
+ ldr r12, [r1]
+ ldr r1, [r1, #8]
+ lsrs r2, r3, #1
+ lsr r3, r3, #1
+ orr r3, r3, r1, lsl #31
+ rrx r2, r12
+ lsr r1, r1, #1
+ stm r0, {r2, r3}
+ str r1, [r0, #8]
+ mov pc, lr
+.Lfunc_end43:
+ .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add3L
+ .align 2
+ .type mcl_fp_add3L,%function
+mcl_fp_add3L: @ @mcl_fp_add3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r11, lr}
+ push {r4, r5, r11, lr}
+ ldm r1, {r12, lr}
+ ldr r1, [r1, #8]
+ ldm r2, {r4, r5}
+ ldr r2, [r2, #8]
+ adds r4, r4, r12
+ adcs r5, r5, lr
+ adcs r1, r2, r1
+ stm r0, {r4, r5}
+ mov r2, #0
+ str r1, [r0, #8]
+ adc r12, r2, #0
+ ldm r3, {r2, lr}
+ ldr r3, [r3, #8]
+ subs r4, r4, r2
+ sbcs r5, r5, lr
+ sbcs r3, r1, r3
+ sbc r1, r12, #0
+ tst r1, #1
+ stmeq r0, {r4, r5}
+ streq r3, [r0, #8]
+ pop {r4, r5, r11, lr}
+ mov pc, lr
+.Lfunc_end44:
+ .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF3L
+ .align 2
+ .type mcl_fp_addNF3L,%function
+mcl_fp_addNF3L: @ @mcl_fp_addNF3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r11, lr}
+ push {r4, r5, r6, r7, r11, lr}
+ ldm r1, {r12, lr}
+ ldr r1, [r1, #8]
+ ldm r2, {r4, r5}
+ ldr r2, [r2, #8]
+ adds r4, r4, r12
+ adcs r5, r5, lr
+ adc r7, r2, r1
+ ldm r3, {r2, r12, lr}
+ subs r2, r4, r2
+ sbcs r3, r5, r12
+ sbc r1, r7, lr
+ asr r6, r1, #31
+ cmp r6, #0
+ movlt r2, r4
+ movlt r3, r5
+ movlt r1, r7
+ stm r0, {r2, r3}
+ str r1, [r0, #8]
+ pop {r4, r5, r6, r7, r11, lr}
+ mov pc, lr
+.Lfunc_end45:
+ .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub3L
+ .align 2
+ .type mcl_fp_sub3L,%function
+mcl_fp_sub3L: @ @mcl_fp_sub3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, lr}
+ push {r4, r5, r6, lr}
+ ldm r2, {r12, lr}
+ ldr r4, [r2, #8]
+ ldm r1, {r2, r5, r6}
+ subs r1, r2, r12
+ sbcs r2, r5, lr
+ sbcs r12, r6, r4
+ mov r6, #0
+ sbc r6, r6, #0
+ stm r0, {r1, r2, r12}
+ tst r6, #1
+ popeq {r4, r5, r6, lr}
+ moveq pc, lr
+ ldr r6, [r3]
+ ldr r5, [r3, #4]
+ ldr r3, [r3, #8]
+ adds r1, r6, r1
+ adcs r2, r5, r2
+ adc r3, r3, r12
+ stm r0, {r1, r2, r3}
+ pop {r4, r5, r6, lr}
+ mov pc, lr
+.Lfunc_end46:
+ .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF3L
+ .align 2
+ .type mcl_fp_subNF3L,%function
+mcl_fp_subNF3L: @ @mcl_fp_subNF3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r11, lr}
+ push {r4, r5, r6, r7, r11, lr}
+ ldm r2, {r12, lr}
+ ldr r2, [r2, #8]
+ ldm r1, {r4, r5}
+ ldr r1, [r1, #8]
+ subs r4, r4, r12
+ sbcs r7, r5, lr
+ sbc r1, r1, r2
+ ldm r3, {r2, r12, lr}
+ asr r6, r1, #31
+ adds r2, r4, r2
+ adcs r3, r7, r12
+ adc r5, r1, lr
+ cmp r6, #0
+ movge r2, r4
+ movge r3, r7
+ movge r5, r1
+ stm r0, {r2, r3, r5}
+ pop {r4, r5, r6, r7, r11, lr}
+ mov pc, lr
+.Lfunc_end47:
+ .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add3L
+ .align 2
+ .type mcl_fpDbl_add3L,%function
+mcl_fpDbl_add3L: @ @mcl_fpDbl_add3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldm r1, {r12, lr}
+ ldr r7, [r2]
+ ldr r11, [r1, #8]
+ ldr r9, [r1, #12]
+ ldr r10, [r1, #16]
+ ldr r8, [r1, #20]
+ ldmib r2, {r1, r5, r6}
+ ldr r4, [r2, #16]
+ ldr r2, [r2, #20]
+ adds r7, r7, r12
+ adcs r1, r1, lr
+ str r7, [r0]
+ str r1, [r0, #4]
+ adcs r1, r5, r11
+ ldr r5, [r3]
+ adcs r7, r6, r9
+ str r1, [r0, #8]
+ mov r1, #0
+ adcs r6, r4, r10
+ ldr r4, [r3, #4]
+ ldr r3, [r3, #8]
+ adcs r2, r2, r8
+ adc r1, r1, #0
+ subs r5, r7, r5
+ sbcs r4, r6, r4
+ sbcs r3, r2, r3
+ sbc r1, r1, #0
+ ands r1, r1, #1
+ movne r5, r7
+ movne r4, r6
+ movne r3, r2
+ str r5, [r0, #12]
+ str r4, [r0, #16]
+ str r3, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end48:
+ .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub3L
+ .align 2
+ .type mcl_fpDbl_sub3L,%function
+mcl_fpDbl_sub3L: @ @mcl_fpDbl_sub3L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldm r2, {r12, lr}
+ ldr r7, [r1]
+ ldr r11, [r2, #8]
+ ldr r9, [r2, #12]
+ ldr r10, [r2, #16]
+ ldr r8, [r2, #20]
+ ldmib r1, {r2, r5, r6}
+ ldr r4, [r1, #16]
+ ldr r1, [r1, #20]
+ subs r7, r7, r12
+ sbcs r2, r2, lr
+ str r7, [r0]
+ str r2, [r0, #4]
+ sbcs r2, r5, r11
+ ldr r5, [r3]
+ sbcs r7, r6, r9
+ str r2, [r0, #8]
+ mov r2, #0
+ sbcs r6, r4, r10
+ ldr r4, [r3, #4]
+ ldr r3, [r3, #8]
+ sbcs r1, r1, r8
+ sbc r2, r2, #0
+ adds r5, r7, r5
+ adcs r4, r6, r4
+ adc r3, r1, r3
+ ands r2, r2, #1
+ moveq r5, r7
+ moveq r4, r6
+ moveq r3, r1
+ str r5, [r0, #12]
+ str r4, [r0, #16]
+ str r3, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end49:
+ .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre4L
+ .align 2
+ .type mcl_fp_mulUnitPre4L,%function
+mcl_fp_mulUnitPre4L: @ @mcl_fp_mulUnitPre4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r11, lr}
+ push {r4, r5, r6, r7, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r1, [r1, #12]
+ umull r4, r6, r12, r2
+ umull r7, r12, lr, r2
+ str r4, [r0]
+ mov r5, r6
+ mov r4, r7
+ umlal r5, r4, r3, r2
+ str r5, [r0, #4]
+ str r4, [r0, #8]
+ umull r5, lr, r1, r2
+ umull r1, r4, r3, r2
+ adds r1, r6, r1
+ adcs r1, r4, r7
+ adcs r1, r12, r5
+ str r1, [r0, #12]
+ adc r1, lr, #0
+ str r1, [r0, #16]
+ pop {r4, r5, r6, r7, r11, lr}
+ mov pc, lr
+.Lfunc_end50:
+ .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre4L
+ .align 2
+ .type mcl_fpDbl_mulPre4L,%function
+mcl_fpDbl_mulPre4L: @ @mcl_fpDbl_mulPre4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #40
+ sub sp, sp, #40
+ mov lr, r2
+ ldr r11, [r1]
+ ldr r4, [lr]
+ ldmib r1, {r8, r12}
+ ldr r3, [r1, #12]
+ umull r2, r7, r11, r4
+ umull r6, r9, r8, r4
+ str r12, [sp] @ 4-byte Spill
+ adds r6, r7, r6
+ str r2, [sp, #36] @ 4-byte Spill
+ mov r2, r3
+ umull r6, r10, r12, r4
+ adcs r5, r9, r6
+ umlal r7, r6, r8, r4
+ umull r5, r9, r3, r4
+ ldr r3, [sp, #36] @ 4-byte Reload
+ ldr r4, [lr, #4]
+ adcs r10, r10, r5
+ str r3, [r0]
+ adc r3, r9, #0
+ str r3, [sp, #24] @ 4-byte Spill
+ umull r5, r3, r11, r4
+ adds r7, r5, r7
+ str r3, [sp, #32] @ 4-byte Spill
+ str r7, [sp, #36] @ 4-byte Spill
+ umull r7, r3, r8, r4
+ str r3, [sp, #28] @ 4-byte Spill
+ adcs r3, r7, r6
+ umull r7, r9, r12, r4
+ mov r12, r2
+ ldr r6, [sp, #32] @ 4-byte Reload
+ adcs r7, r7, r10
+ umull r5, r10, r2, r4
+ ldr r2, [sp, #24] @ 4-byte Reload
+ mov r4, #0
+ adcs r5, r5, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adc r4, r4, #0
+ adds r6, r3, r6
+ adcs r7, r7, r2
+ ldr r2, [lr, #12]
+ str r7, [sp, #24] @ 4-byte Spill
+ adcs r7, r5, r9
+ str r7, [sp, #20] @ 4-byte Spill
+ adc r7, r4, r10
+ ldr r4, [lr, #8]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [sp, #36] @ 4-byte Reload
+ str r7, [r0, #4]
+ umull r5, r7, r11, r4
+ adds r5, r5, r6
+ str r7, [sp, #12] @ 4-byte Spill
+ str r5, [r0, #8]
+ ldm r1, {r11, lr}
+ ldr r5, [r1, #8]
+ ldr r1, [r1, #12]
+ ldr r3, [sp, #24] @ 4-byte Reload
+ umull r6, r7, r1, r2
+ umull r10, r1, r5, r2
+ str r1, [sp, #32] @ 4-byte Spill
+ umull r5, r1, lr, r2
+ str r6, [sp, #8] @ 4-byte Spill
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ umull r6, r1, r11, r2
+ umull r2, r11, r12, r4
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [sp] @ 4-byte Reload
+ umull lr, r12, r1, r4
+ umull r9, r1, r8, r4
+ ldr r4, [sp, #20] @ 4-byte Reload
+ mov r8, #0
+ adcs r3, r9, r3
+ adcs r4, lr, r4
+ adcs r2, r2, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adc lr, r8, #0
+ adds r3, r3, r7
+ adcs r1, r4, r1
+ adcs r2, r2, r12
+ adc r4, lr, r11
+ adds r3, r6, r3
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r3, [r0, #12]
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adcs r1, r5, r1
+ adcs r2, r10, r2
+ adcs r3, r3, r4
+ adc r7, r8, #0
+ adds r1, r1, r6
+ str r1, [r0, #16]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r1, [r0, #20]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r3, r1
+ str r1, [r0, #24]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adc r1, r7, r1
+ str r1, [r0, #28]
+ add sp, sp, #40
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end51:
+ .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre4L
+ .align 2
+ .type mcl_fpDbl_sqrPre4L,%function
+mcl_fpDbl_sqrPre4L: @ @mcl_fpDbl_sqrPre4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldm r1, {r2, r3, r12}
+ ldr r8, [r1, #12]
+ umull r4, r6, r2, r2
+ umull r11, lr, r12, r2
+ str r4, [r0]
+ umull r10, r4, r8, r2
+ mov r7, r11
+ mov r5, r6
+ str lr, [sp, #12] @ 4-byte Spill
+ str r4, [sp, #8] @ 4-byte Spill
+ umull r4, r9, r3, r2
+ umlal r5, r7, r3, r2
+ adds r2, r6, r4
+ adcs r2, r9, r11
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r10, lr, r10
+ adc r2, r2, #0
+ adds r4, r4, r5
+ str r2, [sp] @ 4-byte Spill
+ umull r6, r2, r3, r3
+ str r4, [sp, #8] @ 4-byte Spill
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [sp] @ 4-byte Reload
+ adcs r5, r6, r7
+ umull r6, r7, r12, r3
+ adcs lr, r6, r10
+ umull r4, r10, r8, r3
+ adcs r3, r4, r2
+ ldr r2, [sp, #4] @ 4-byte Reload
+ mov r4, #0
+ adc r4, r4, #0
+ adds r5, r5, r9
+ adcs r9, lr, r2
+ adcs r2, r3, r7
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adc r4, r4, r10
+ adds r5, r11, r5
+ str r2, [sp, #4] @ 4-byte Spill
+ umull r2, r10, r8, r12
+ umull lr, r8, r12, r12
+ adcs r6, r6, r9
+ stmib r0, {r3, r5}
+ mov r5, #0
+ ldr r3, [sp, #4] @ 4-byte Reload
+ adcs r3, lr, r3
+ adcs r2, r2, r4
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r6, r6, r4
+ adcs r11, r3, r7
+ adcs lr, r2, r8
+ adc r8, r5, r10
+ ldr r5, [r1]
+ ldmib r1, {r4, r7}
+ ldr r1, [r1, #12]
+ umull r12, r2, r1, r1
+ umull r3, r9, r7, r1
+ umull r7, r10, r4, r1
+ str r2, [sp, #12] @ 4-byte Spill
+ umull r4, r2, r5, r1
+ adds r1, r4, r6
+ adcs r4, r7, r11
+ str r1, [r0, #12]
+ mov r7, #0
+ adcs r3, r3, lr
+ adcs r1, r12, r8
+ adc r7, r7, #0
+ adds r2, r4, r2
+ str r2, [r0, #16]
+ adcs r2, r3, r10
+ adcs r1, r1, r9
+ str r2, [r0, #20]
+ str r1, [r0, #24]
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adc r1, r7, r1
+ str r1, [r0, #28]
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end52:
+ .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont4L
+ .align 2
+ .type mcl_fp_mont4L,%function
+mcl_fp_mont4L: @ @mcl_fp_mont4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #76
+ sub sp, sp, #76
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r2, #8]
+ ldr r9, [r2]
+ ldr r8, [r2, #4]
+ ldr r6, [r3, #-4]
+ ldr r11, [r1, #8]
+ ldr r10, [r1, #12]
+ ldr r7, [r3, #8]
+ ldr r5, [r3, #4]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r2, #12]
+ ldr r2, [r1, #4]
+ str r6, [sp, #44] @ 4-byte Spill
+ str r7, [sp, #40] @ 4-byte Spill
+ str r5, [sp, #52] @ 4-byte Spill
+ str r11, [sp, #60] @ 4-byte Spill
+ str r10, [sp, #56] @ 4-byte Spill
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1]
+ ldr r1, [r3]
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r3, [r3, #12]
+ umull r4, r2, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ str r1, [sp, #48] @ 4-byte Spill
+ mul r0, r4, r6
+ str r4, [sp, #24] @ 4-byte Spill
+ mov r4, r5
+ umull lr, r6, r0, r7
+ umull r7, r12, r0, r1
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [sp, #72] @ 4-byte Reload
+ str r6, [sp, #16] @ 4-byte Spill
+ mov r6, r12
+ str lr, [sp, #8] @ 4-byte Spill
+ umlal r6, lr, r0, r5
+ umull r5, r1, r10, r9
+ str r1, [sp, #68] @ 4-byte Spill
+ str r5, [sp, #12] @ 4-byte Spill
+ umull r1, r10, r11, r9
+ umull r11, r5, r7, r9
+ adds r7, r2, r11
+ adcs r5, r5, r1
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adcs r11, r10, r5
+ ldr r5, [sp, #68] @ 4-byte Reload
+ str r3, [sp, #68] @ 4-byte Spill
+ adc r5, r5, #0
+ str r5, [sp, #12] @ 4-byte Spill
+ umull r5, r7, r0, r3
+ umull r10, r3, r0, r4
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adds r0, r12, r10
+ mov r12, #0
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r3, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #20] @ 4-byte Reload
+ adc r3, r7, #0
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adds r4, r5, r4
+ umlal r2, r1, r7, r9
+ adcs r2, r6, r2
+ adcs r1, lr, r1
+ str r2, [sp, #24] @ 4-byte Spill
+ adcs r9, r0, r11
+ ldr r0, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ adcs r6, r3, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mov r3, r7
+ adc r10, r12, #0
+ umull r2, r12, r8, r7
+ ldr r7, [sp, #64] @ 4-byte Reload
+ umull r5, r4, r8, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ umull r1, lr, r8, r0
+ umull r11, r0, r8, r7
+ adds r2, r0, r2
+ adcs r2, r12, r1
+ umlal r0, r1, r8, r3
+ ldr r3, [sp, #24] @ 4-byte Reload
+ ldr r8, [sp, #48] @ 4-byte Reload
+ adcs r2, lr, r5
+ adc r5, r4, #0
+ adds r7, r3, r11
+ ldr r3, [sp, #20] @ 4-byte Reload
+ ldr r11, [sp, #40] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r9, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ adcs r0, r6, r2
+ str r0, [sp, #16] @ 4-byte Spill
+ adcs r0, r10, r5
+ ldr r10, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ mul r5, r7, r10
+ umull r6, r0, r5, r11
+ str r0, [sp] @ 4-byte Spill
+ umull r0, r3, r5, r8
+ mov r4, r6
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ mov r2, r3
+ umlal r2, r4, r5, r1
+ umull r9, r12, r5, r0
+ umull lr, r0, r5, r1
+ adds r3, r3, lr
+ adcs r0, r0, r6
+ ldr r3, [sp, #4] @ 4-byte Reload
+ ldr r0, [sp] @ 4-byte Reload
+ adcs r0, r0, r9
+ adc r1, r12, #0
+ adds r3, r3, r7
+ ldr r12, [sp, #64] @ 4-byte Reload
+ ldr r3, [sp, #24] @ 4-byte Reload
+ adcs r2, r2, r3
+ ldr r3, [sp, #28] @ 4-byte Reload
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [sp, #20] @ 4-byte Reload
+ umull r9, r7, r3, r12
+ adcs r2, r4, r2
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ umull r6, r5, r3, r0
+ umull r0, r4, r3, r1
+ umull r1, lr, r3, r2
+ adds r1, r7, r1
+ adcs r1, lr, r0
+ umlal r7, r0, r3, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r1, r4, r6
+ adc r6, r5, #0
+ adds r3, r2, r9
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r2, r2, r7
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r6
+ mul r6, r3, r10
+ str r0, [sp, #16] @ 4-byte Spill
+ mov r0, #0
+ umull r7, r9, r6, r11
+ umull r10, r4, r6, r8
+ adc r0, r0, #0
+ mov r2, r4
+ mov r5, r7
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ umlal r2, r5, r6, r1
+ umull r8, r12, r6, r0
+ umull lr, r0, r6, r1
+ adds r6, r4, lr
+ adcs r0, r0, r7
+ adcs r0, r9, r8
+ adc r1, r12, #0
+ adds r3, r10, r3
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r2, r2, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r8, r5, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs r9, r0, r2
+ ldr r0, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #72] @ 4-byte Reload
+ umull lr, r7, r3, r5
+ ldr r5, [sp, #52] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ umull r6, r10, r3, r0
+ umull r0, r4, r3, r1
+ umull r1, r12, r3, r2
+ adds r1, r7, r1
+ adcs r1, r12, r0
+ umlal r7, r0, r3, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ ldr r12, [sp, #68] @ 4-byte Reload
+ adcs r1, r4, r6
+ ldr r4, [sp, #40] @ 4-byte Reload
+ adc r6, r10, #0
+ adds lr, r2, lr
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adcs r10, r8, r7
+ adcs r0, r9, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ adcs r0, r11, r1
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r8, r0, r6
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ mul r6, lr, r0
+ umull r1, r3, r6, r5
+ umull r11, r7, r6, r2
+ umull r0, r9, r6, r4
+ adds r1, r7, r1
+ adcs r1, r3, r0
+ umlal r7, r0, r6, r5
+ umull r1, r3, r6, r12
+ adcs r1, r9, r1
+ mov r9, r5
+ adc r5, r3, #0
+ adds r3, r11, lr
+ adcs r3, r7, r10
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r7
+ adcs lr, r5, r8
+ ldr r5, [sp, #60] @ 4-byte Reload
+ adc r8, r5, #0
+ subs r6, r3, r2
+ sbcs r5, r0, r9
+ sbcs r4, r1, r4
+ sbcs r7, lr, r12
+ sbc r2, r8, #0
+ ands r2, r2, #1
+ movne r5, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ movne r6, r3
+ movne r4, r1
+ cmp r2, #0
+ movne r7, lr
+ str r6, [r0]
+ str r5, [r0, #4]
+ str r4, [r0, #8]
+ str r7, [r0, #12]
+ add sp, sp, #76
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end53:
+ .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF4L
+ .align 2
+ .type mcl_fp_montNF4L,%function
+mcl_fp_montNF4L: @ @mcl_fp_montNF4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #140
+ sub sp, sp, #140
+ mov r10, r3
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr lr, [r1]
+ ldmib r1, {r4, r8, r12}
+ ldr r3, [r2]
+ ldr r1, [r2, #4]
+ ldr r0, [r2, #8]
+ ldr r2, [r2, #12]
+ umull r6, r5, r2, r8
+ str r5, [sp, #124] @ 4-byte Spill
+ umull r5, r7, r2, lr
+ str r6, [sp, #112] @ 4-byte Spill
+ str r5, [sp, #128] @ 4-byte Spill
+ mov r5, r6
+ mov r6, r7
+ str r7, [sp, #108] @ 4-byte Spill
+ umlal r6, r5, r2, r4
+ str r5, [sp, #120] @ 4-byte Spill
+ umull r7, r5, r0, r8
+ str r6, [sp, #116] @ 4-byte Spill
+ str r5, [sp, #84] @ 4-byte Spill
+ umull r5, r6, r0, lr
+ str r7, [sp, #72] @ 4-byte Spill
+ str r5, [sp, #88] @ 4-byte Spill
+ str r6, [sp, #68] @ 4-byte Spill
+ mov r5, r6
+ mov r6, r7
+ umlal r5, r6, r0, r4
+ str r5, [sp, #76] @ 4-byte Spill
+ str r6, [sp, #80] @ 4-byte Spill
+ umull r6, r5, r1, r8
+ str r5, [sp, #44] @ 4-byte Spill
+ umull r5, r7, r1, lr
+ str r6, [sp, #32] @ 4-byte Spill
+ str r5, [sp, #48] @ 4-byte Spill
+ mov r5, r6
+ mov r6, r7
+ str r7, [sp, #28] @ 4-byte Spill
+ umlal r6, r5, r1, r4
+ str r5, [sp, #40] @ 4-byte Spill
+ umull r9, r5, r8, r3
+ str r6, [sp, #36] @ 4-byte Spill
+ str r5, [sp, #136] @ 4-byte Spill
+ umull r6, r5, lr, r3
+ mov r8, r9
+ str r6, [sp, #4] @ 4-byte Spill
+ umull r11, r6, r2, r12
+ mov lr, r5
+ str r6, [sp, #104] @ 4-byte Spill
+ umull r7, r6, r2, r4
+ umlal lr, r8, r4, r3
+ str r11, [sp, #100] @ 4-byte Spill
+ str r6, [sp, #96] @ 4-byte Spill
+ umull r6, r2, r0, r12
+ str r7, [sp, #92] @ 4-byte Spill
+ str r6, [sp, #60] @ 4-byte Spill
+ str r2, [sp, #64] @ 4-byte Spill
+ umull r6, r2, r0, r4
+ str r2, [sp, #56] @ 4-byte Spill
+ umull r2, r0, r1, r12
+ str r6, [sp, #52] @ 4-byte Spill
+ str r2, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #24] @ 4-byte Spill
+ umull r2, r0, r1, r4
+ str r2, [sp, #12] @ 4-byte Spill
+ umull r2, r6, r4, r3
+ str r0, [sp, #16] @ 4-byte Spill
+ umull r0, r1, r12, r3
+ ldr r4, [r10, #4]
+ adds r2, r5, r2
+ ldr r5, [sp, #4] @ 4-byte Reload
+ adcs r2, r6, r9
+ ldr r9, [r10, #8]
+ ldr r2, [sp, #136] @ 4-byte Reload
+ str r4, [sp, #136] @ 4-byte Spill
+ adcs r12, r2, r0
+ ldr r2, [r10, #-4]
+ adc r0, r1, #0
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [r10]
+ mul r1, r5, r2
+ mov r7, r2
+ umull r3, r11, r1, r0
+ str r0, [sp, #8] @ 4-byte Spill
+ mov r6, r0
+ umull r2, r0, r1, r9
+ adds r3, r3, r5
+ umull r3, r5, r1, r4
+ adcs r3, r3, lr
+ ldr lr, [r10, #12]
+ adcs r2, r2, r8
+ umull r4, r8, r1, lr
+ adcs r1, r4, r12
+ ldr r4, [sp] @ 4-byte Reload
+ adc r4, r4, #0
+ adds r3, r3, r11
+ adcs r2, r2, r5
+ adcs r12, r1, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r1, r4, r8
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adds r4, r0, r4
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r4, [sp, #16] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adcs r4, r0, r4
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r5, r0, #0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adds r3, r0, r3
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r2, r0, r2
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r12
+ mov r12, r7
+ adcs r8, r4, r1
+ ldr r1, [sp, #136] @ 4-byte Reload
+ adc r10, r5, #0
+ mul r5, r3, r7
+ umull r7, r11, r5, r6
+ adds r3, r7, r3
+ umull r3, r7, r5, r1
+ adcs r2, r3, r2
+ umull r3, r4, r5, r9
+ adcs r0, r3, r0
+ umull r3, r6, r5, lr
+ adcs r3, r3, r8
+ ldr r8, [sp, #8] @ 4-byte Reload
+ adc r5, r10, #0
+ adds r2, r2, r11
+ adcs r0, r0, r7
+ adcs r3, r3, r4
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adc r7, r5, r6
+ ldr r5, [sp, #52] @ 4-byte Reload
+ ldr r6, [sp, #88] @ 4-byte Reload
+ adds r4, r4, r5
+ ldr r5, [sp, #56] @ 4-byte Reload
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adcs r4, r5, r4
+ ldr r5, [sp, #60] @ 4-byte Reload
+ ldr r4, [sp, #84] @ 4-byte Reload
+ adcs r4, r4, r5
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r2, r6, r2
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adcs r3, r6, r3
+ adcs r6, r4, r7
+ adc r10, r5, #0
+ mul r5, r2, r12
+ umull r7, r11, r5, r8
+ adds r2, r7, r2
+ umull r2, r7, r5, r1
+ adcs r0, r2, r0
+ umull r2, r4, r5, r9
+ adcs r2, r2, r3
+ umull r3, r1, r5, lr
+ adcs r3, r3, r6
+ ldr r6, [sp, #128] @ 4-byte Reload
+ adc r5, r10, #0
+ adds r0, r0, r11
+ adcs r2, r2, r7
+ adcs r3, r3, r4
+ ldr r4, [sp, #108] @ 4-byte Reload
+ adc r1, r5, r1
+ ldr r5, [sp, #92] @ 4-byte Reload
+ adds r4, r4, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ ldr r4, [sp, #112] @ 4-byte Reload
+ adcs r4, r5, r4
+ ldr r5, [sp, #100] @ 4-byte Reload
+ ldr r4, [sp, #124] @ 4-byte Reload
+ adcs r4, r4, r5
+ ldr r5, [sp, #104] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r0, r6, r0
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r2, r6, r2
+ ldr r6, [sp, #120] @ 4-byte Reload
+ adcs r3, r6, r3
+ adcs r11, r4, r1
+ adc r10, r5, #0
+ mul r5, r0, r12
+ umull r7, r1, r5, r8
+ adds r0, r7, r0
+ ldr r7, [sp, #136] @ 4-byte Reload
+ umull r0, r12, r5, r9
+ umull r6, r4, r5, r7
+ adcs r2, r6, r2
+ adcs r0, r0, r3
+ umull r3, r6, r5, lr
+ adcs r3, r3, r11
+ adc r5, r10, #0
+ adds r1, r2, r1
+ adcs r0, r0, r4
+ adcs r2, r3, r12
+ adc r3, r5, r6
+ subs r4, r1, r8
+ sbcs r7, r0, r7
+ sbcs r6, r2, r9
+ sbc r5, r3, lr
+ cmp r5, #0
+ movlt r7, r0
+ ldr r0, [sp, #132] @ 4-byte Reload
+ movlt r4, r1
+ movlt r6, r2
+ cmp r5, #0
+ movlt r5, r3
+ stm r0, {r4, r7}
+ str r6, [r0, #8]
+ str r5, [r0, #12]
+ add sp, sp, #140
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end54:
+ .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed4L
+ .align 2
+ .type mcl_fp_montRed4L,%function
+mcl_fp_montRed4L: @ @mcl_fp_montRed4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ ldr r7, [r1, #4]
+ ldr r6, [r2, #-4]
+ ldr r10, [r1]
+ ldr r3, [r2, #8]
+ ldr r8, [r2]
+ ldr r12, [r2, #4]
+ ldr r2, [r2, #12]
+ str r0, [sp, #52] @ 4-byte Spill
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r1, #8]
+ str r6, [sp, #56] @ 4-byte Spill
+ str r3, [sp, #40] @ 4-byte Spill
+ str r2, [sp, #36] @ 4-byte Spill
+ str r8, [sp, #32] @ 4-byte Spill
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r1, #12]
+ str r7, [sp, #44] @ 4-byte Spill
+ mul r7, r10, r6
+ umull r6, r5, r7, r3
+ str r5, [sp, #20] @ 4-byte Spill
+ mov r5, r3
+ umull r4, r3, r7, r8
+ mov lr, r6
+ str r4, [sp, #24] @ 4-byte Spill
+ umull r9, r4, r7, r2
+ umull r11, r2, r7, r12
+ mov r0, r3
+ adds r3, r3, r11
+ umlal r0, lr, r7, r12
+ adcs r2, r2, r6
+ ldr r6, [sp, #56] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r2, r2, r9
+ str r2, [sp, #20] @ 4-byte Spill
+ adc r2, r4, #0
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adds r4, r10, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ add r10, r1, #16
+ adcs r11, r2, r0
+ mul r4, r11, r6
+ umull r9, r0, r4, r5
+ str r0, [sp, #24] @ 4-byte Spill
+ umull r0, r2, r4, r8
+ mov r5, r9
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ mov r7, r2
+ umlal r7, r5, r4, r12
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r1, r8, r10}
+ ldr r3, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, lr
+ ldr r3, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r3, r3, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ str r3, [sp, #48] @ 4-byte Spill
+ adcs r1, r1, r0
+ adcs r0, r8, #0
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r8, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ adcs r0, r10, #0
+ ldr r10, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ umull r1, lr, r4, r10
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r3, r0, r4, r12
+ adds r3, r2, r3
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #40] @ 4-byte Reload
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adc r1, lr, #0
+ adds r2, r2, r11
+ adcs r11, r7, r0
+ mul r3, r11, r6
+ umull r2, r0, r3, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ umull r0, r6, r3, r8
+ mov r7, r2
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ mov r4, r6
+ umlal r4, r7, r3, r12
+ adcs r0, r5, r0
+ ldr r5, [sp] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r1, r0
+ umull r1, r5, r3, r10
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ umull lr, r0, r3, r12
+ adds r3, r6, lr
+ mov lr, r8
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ ldr r3, [sp, #44] @ 4-byte Reload
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ adc r1, r5, #0
+ adds r2, r2, r11
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adcs r2, r4, r2
+ adcs r3, r7, r3
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r3
+ mov r3, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mul r5, r2, r0
+ umull r4, r0, r5, r12
+ umull r8, r6, r5, lr
+ adds r4, r6, r4
+ umull r1, r4, r5, r3
+ adcs r0, r0, r1
+ umlal r6, r1, r5, r12
+ umull r0, r7, r5, r10
+ adcs r0, r4, r0
+ ldr r4, [sp, #44] @ 4-byte Reload
+ adc r5, r7, #0
+ adds r2, r8, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adcs r2, r6, r2
+ adcs r1, r1, r4
+ ldr r4, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ adcs r9, r5, r4
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adc r8, r4, #0
+ subs r6, r2, lr
+ sbcs r5, r1, r12
+ sbcs r4, r0, r3
+ sbcs r7, r9, r10
+ sbc r3, r8, #0
+ ands r3, r3, #1
+ movne r4, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ movne r6, r2
+ movne r5, r1
+ cmp r3, #0
+ movne r7, r9
+ str r6, [r0]
+ str r5, [r0, #4]
+ str r4, [r0, #8]
+ str r7, [r0, #12]
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end55:
+ .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre4L
+ .align 2
+ .type mcl_fp_addPre4L,%function
+mcl_fp_addPre4L: @ @mcl_fp_addPre4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, lr}
+ push {r4, r5, r6, lr}
+ ldm r1, {r3, r12, lr}
+ ldr r1, [r1, #12]
+ ldm r2, {r4, r5, r6}
+ ldr r2, [r2, #12]
+ adds r3, r4, r3
+ adcs r5, r5, r12
+ adcs r6, r6, lr
+ adcs r1, r2, r1
+ stm r0, {r3, r5, r6}
+ str r1, [r0, #12]
+ mov r0, #0
+ adc r0, r0, #0
+ pop {r4, r5, r6, lr}
+ mov pc, lr
+.Lfunc_end56:
+ .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre4L
+ .align 2
+ .type mcl_fp_subPre4L,%function
+mcl_fp_subPre4L: @ @mcl_fp_subPre4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, lr}
+ push {r4, r5, r6, lr}
+ ldm r2, {r3, r12, lr}
+ ldr r2, [r2, #12]
+ ldm r1, {r4, r5, r6}
+ ldr r1, [r1, #12]
+ subs r3, r4, r3
+ sbcs r5, r5, r12
+ sbcs r6, r6, lr
+ sbcs r1, r1, r2
+ stm r0, {r3, r5, r6}
+ str r1, [r0, #12]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ pop {r4, r5, r6, lr}
+ mov pc, lr
+.Lfunc_end57:
+ .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_4L
+ .align 2
+ .type mcl_fp_shr1_4L,%function
+mcl_fp_shr1_4L: @ @mcl_fp_shr1_4L
+ .fnstart
+@ BB#0:
+ .save {r11, lr}
+ push {r11, lr}
+ ldr r3, [r1, #4]
+ ldr r12, [r1]
+ ldr lr, [r1, #12]
+ ldr r2, [r1, #8]
+ lsrs r1, r3, #1
+ lsr r3, r3, #1
+ rrx r12, r12
+ lsrs r1, lr, #1
+ orr r3, r3, r2, lsl #31
+ rrx r1, r2
+ lsr r2, lr, #1
+ str r12, [r0]
+ str r3, [r0, #4]
+ str r1, [r0, #8]
+ str r2, [r0, #12]
+ pop {r11, lr}
+ mov pc, lr
+.Lfunc_end58:
+ .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add4L
+ .align 2
+ .type mcl_fp_add4L,%function
+mcl_fp_add4L: @ @mcl_fp_add4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r11, lr}
+ push {r4, r5, r6, r7, r11, lr}
+ ldm r1, {r12, lr}
+ ldr r4, [r1, #8]
+ ldr r1, [r1, #12]
+ ldm r2, {r5, r6, r7}
+ ldr r2, [r2, #12]
+ adds r5, r5, r12
+ adcs r6, r6, lr
+ adcs r7, r7, r4
+ stm r0, {r5, r6, r7}
+ adcs r4, r2, r1
+ mov r1, #0
+ ldr r2, [r3]
+ adc lr, r1, #0
+ str r4, [r0, #12]
+ ldmib r3, {r1, r12}
+ ldr r3, [r3, #12]
+ subs r5, r5, r2
+ sbcs r2, r6, r1
+ sbcs r1, r7, r12
+ sbcs r12, r4, r3
+ sbc r3, lr, #0
+ tst r3, #1
+ streq r5, [r0]
+ streq r2, [r0, #4]
+ streq r1, [r0, #8]
+ streq r12, [r0, #12]
+ pop {r4, r5, r6, r7, r11, lr}
+ mov pc, lr
+.Lfunc_end59:
+ .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF4L
+ .align 2
+ .type mcl_fp_addNF4L,%function
+mcl_fp_addNF4L: @ @mcl_fp_addNF4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ ldm r1, {r12, lr}
+ ldr r4, [r1, #8]
+ ldr r1, [r1, #12]
+ ldm r2, {r5, r6, r7}
+ ldr r2, [r2, #12]
+ adds r5, r5, r12
+ adcs r6, r6, lr
+ adcs r7, r7, r4
+ adc r8, r2, r1
+ ldm r3, {r2, r4, r12, lr}
+ subs r2, r5, r2
+ sbcs r4, r6, r4
+ sbcs r3, r7, r12
+ sbc r1, r8, lr
+ cmp r1, #0
+ movlt r2, r5
+ movlt r4, r6
+ movlt r3, r7
+ cmp r1, #0
+ movlt r1, r8
+ stm r0, {r2, r4}
+ str r3, [r0, #8]
+ str r1, [r0, #12]
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end60:
+ .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub4L
+ .align 2
+ .type mcl_fp_sub4L,%function
+mcl_fp_sub4L: @ @mcl_fp_sub4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ ldm r2, {r12, lr}
+ ldr r4, [r2, #8]
+ ldr r5, [r2, #12]
+ ldm r1, {r2, r6, r7}
+ ldr r1, [r1, #12]
+ subs r8, r2, r12
+ sbcs r2, r6, lr
+ str r8, [r0]
+ sbcs r12, r7, r4
+ sbcs lr, r1, r5
+ mov r1, #0
+ sbc r1, r1, #0
+ stmib r0, {r2, r12, lr}
+ tst r1, #1
+ popeq {r4, r5, r6, r7, r8, lr}
+ moveq pc, lr
+ ldm r3, {r1, r4, r5}
+ ldr r3, [r3, #12]
+ adds r1, r1, r8
+ adcs r2, r4, r2
+ adcs r7, r5, r12
+ adc r3, r3, lr
+ stm r0, {r1, r2, r7}
+ str r3, [r0, #12]
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end61:
+ .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF4L
+ .align 2
+ .type mcl_fp_subNF4L,%function
+mcl_fp_subNF4L: @ @mcl_fp_subNF4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ ldm r2, {r12, lr}
+ ldr r4, [r2, #8]
+ ldr r2, [r2, #12]
+ ldm r1, {r5, r6, r7}
+ ldr r1, [r1, #12]
+ subs r5, r5, r12
+ sbcs r6, r6, lr
+ sbcs r8, r7, r4
+ sbc r1, r1, r2
+ ldm r3, {r2, r4, r12, lr}
+ adds r2, r5, r2
+ adcs r4, r6, r4
+ adcs r3, r8, r12
+ adc r7, r1, lr
+ cmp r1, #0
+ movge r2, r5
+ movge r4, r6
+ movge r3, r8
+ cmp r1, #0
+ movge r7, r1
+ stm r0, {r2, r4}
+ str r3, [r0, #8]
+ str r7, [r0, #12]
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end62:
+ .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add4L
+ .align 2
+ .type mcl_fpDbl_add4L,%function
+mcl_fpDbl_add4L: @ @mcl_fpDbl_add4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldm r1, {r8, r9, r10, r11}
+ ldr r7, [r1, #16]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r1, #20]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r1, #24]
+ ldr r1, [r1, #28]
+ str r7, [sp, #8] @ 4-byte Spill
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r2, {r1, r6, r7, r12, lr}
+ ldr r4, [r2, #20]
+ ldr r5, [r2, #24]
+ ldr r2, [r2, #28]
+ adds r1, r1, r8
+ adcs r6, r6, r9
+ adcs r7, r7, r10
+ adcs r12, r12, r11
+ stm r0, {r1, r6, r7, r12}
+ mov r1, #0
+ ldr r7, [sp] @ 4-byte Reload
+ ldr r6, [sp, #4] @ 4-byte Reload
+ adcs r7, lr, r7
+ adcs r6, r4, r6
+ ldr r4, [sp, #8] @ 4-byte Reload
+ adcs r8, r5, r4
+ ldr r5, [sp, #12] @ 4-byte Reload
+ ldr r4, [r3]
+ adcs lr, r2, r5
+ adc r12, r1, #0
+ ldmib r3, {r1, r2, r3}
+ subs r4, r7, r4
+ sbcs r1, r6, r1
+ sbcs r2, r8, r2
+ sbcs r3, lr, r3
+ sbc r5, r12, #0
+ ands r5, r5, #1
+ movne r4, r7
+ movne r1, r6
+ movne r2, r8
+ cmp r5, #0
+ movne r3, lr
+ str r4, [r0, #16]
+ str r1, [r0, #20]
+ str r2, [r0, #24]
+ str r3, [r0, #28]
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end63:
+ .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub4L
+ .align 2
+ .type mcl_fpDbl_sub4L,%function
+mcl_fpDbl_sub4L: @ @mcl_fpDbl_sub4L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldm r2, {r8, r9, r10, r11}
+ ldr r7, [r2, #16]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r2, [r2, #28]
+ str r7, [sp, #8] @ 4-byte Spill
+ str r2, [sp, #12] @ 4-byte Spill
+ ldm r1, {r2, r6, r7, r12, lr}
+ ldr r4, [r1, #20]
+ ldr r5, [r1, #24]
+ ldr r1, [r1, #28]
+ subs r2, r2, r8
+ str r2, [r0]
+ sbcs r2, r6, r9
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r2, [r0, #4]
+ sbcs r2, r7, r10
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #8]
+ sbcs r2, r12, r11
+ str r2, [r0, #12]
+ mov r2, #0
+ sbcs r7, lr, r7
+ sbcs r6, r4, r6
+ ldr r4, [sp, #8] @ 4-byte Reload
+ sbcs r5, r5, r4
+ ldr r4, [sp, #12] @ 4-byte Reload
+ sbcs lr, r1, r4
+ ldr r4, [r3]
+ ldr r1, [r3, #8]
+ sbc r12, r2, #0
+ ldr r2, [r3, #4]
+ ldr r3, [r3, #12]
+ adds r4, r7, r4
+ adcs r2, r6, r2
+ adcs r1, r5, r1
+ adc r3, lr, r3
+ ands r12, r12, #1
+ moveq r4, r7
+ moveq r2, r6
+ moveq r1, r5
+ cmp r12, #0
+ moveq r3, lr
+ str r4, [r0, #16]
+ str r2, [r0, #20]
+ str r1, [r0, #24]
+ str r3, [r0, #28]
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end64:
+ .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre5L
+ .align 2
+ .type mcl_fp_mulUnitPre5L,%function
+mcl_fp_mulUnitPre5L: @ @mcl_fp_mulUnitPre5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r10, [r1, #12]
+ ldr r8, [r1, #16]
+ umull r4, r9, lr, r2
+ umull r1, r6, r12, r2
+ mov r7, r6
+ mov r5, r4
+ umlal r7, r5, r3, r2
+ stm r0, {r1, r7}
+ str r5, [r0, #8]
+ umull r5, r7, r3, r2
+ umull r1, r12, r10, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r1, r9, r1
+ str r1, [r0, #12]
+ umull r1, r3, r8, r2
+ adcs r1, r12, r1
+ str r1, [r0, #16]
+ adc r1, r3, #0
+ str r1, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, lr}
+ mov pc, lr
+.Lfunc_end65:
+ .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre5L
+ .align 2
+ .type mcl_fpDbl_mulPre5L,%function
+mcl_fpDbl_mulPre5L: @ @mcl_fpDbl_mulPre5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #36
+ sub sp, sp, #36
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2]
+ ldm r1, {r12, lr}
+ ldr r9, [r1, #8]
+ ldr r10, [r1, #12]
+ umull r5, r4, r12, r3
+ umull r6, r7, lr, r3
+ adds r6, r4, r6
+ str r5, [sp, #24] @ 4-byte Spill
+ umull r5, r6, r9, r3
+ adcs r7, r7, r5
+ umlal r4, r5, lr, r3
+ umull r7, r11, r10, r3
+ adcs r6, r6, r7
+ ldr r7, [r1, #16]
+ str r6, [sp, #28] @ 4-byte Spill
+ umull r6, r8, r7, r3
+ ldr r3, [sp, #24] @ 4-byte Reload
+ adcs r11, r11, r6
+ ldr r6, [r2, #4]
+ str r3, [r0]
+ umull r3, r2, r12, r6
+ adc r12, r8, #0
+ adds r8, r3, r4
+ str r2, [sp, #24] @ 4-byte Spill
+ umull r3, r2, lr, r6
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r5, r3, r5
+ umull r3, lr, r10, r6
+ umull r4, r10, r9, r6
+ str r8, [r0, #4]
+ adcs r4, r4, r2
+ umull r2, r9, r7, r6
+ adcs r3, r3, r11
+ adcs r7, r2, r12
+ mov r2, #0
+ adc r6, r2, #0
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adds r5, r5, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r11, r4, r2
+ adcs r2, r3, r10
+ ldr r3, [sp, #32] @ 4-byte Reload
+ str r2, [sp, #16] @ 4-byte Spill
+ adcs r2, r7, lr
+ ldr r7, [r1]
+ str r2, [sp, #8] @ 4-byte Spill
+ adc r2, r6, r9
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [r3, #8]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldmib r1, {r8, lr}
+ ldr r6, [r1, #12]
+ umull r12, r4, r7, r2
+ adds r7, r12, r5
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r12, [r1, #16]
+ str r7, [sp, #20] @ 4-byte Spill
+ umull r5, r7, r8, r2
+ str r7, [sp, #4] @ 4-byte Spill
+ adcs r10, r5, r11
+ umull r5, r7, lr, r2
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [sp, #16] @ 4-byte Reload
+ adcs r9, r5, r7
+ umull r4, r7, r6, r2
+ mov r5, #0
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r4, r4, r7
+ umull r11, r7, r12, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r2, r11, r2
+ adc r11, r5, #0
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adds r5, r10, r5
+ str r5, [sp, #12] @ 4-byte Spill
+ ldr r5, [sp, #4] @ 4-byte Reload
+ adcs r5, r9, r5
+ str r5, [sp, #8] @ 4-byte Spill
+ ldr r5, [sp] @ 4-byte Reload
+ adcs r4, r4, r5
+ ldr r5, [sp, #16] @ 4-byte Reload
+ adcs r10, r2, r5
+ adc r2, r11, r7
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r2, [r0, #8]
+ ldr r2, [r3, #12]
+ umull r11, r3, r6, r2
+ str r3, [sp, #20] @ 4-byte Spill
+ umull r6, r3, lr, r2
+ umull lr, r9, r8, r2
+ str r3, [sp, #24] @ 4-byte Spill
+ ldr r3, [sp, #28] @ 4-byte Reload
+ umull r7, r8, r3, r2
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adds r3, r7, r3
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adcs r5, lr, r3
+ mov r3, #0
+ adcs r6, r6, r4
+ umull r4, lr, r12, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r7, r11, r10
+ adcs r2, r4, r2
+ adc r3, r3, #0
+ adds r10, r5, r8
+ adcs r11, r6, r9
+ ldr r6, [sp, #24] @ 4-byte Reload
+ adcs r7, r7, r6
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r2, r2, r7
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r2, [r0, #12]
+ adc r2, r3, lr
+ ldr r3, [r1]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [sp, #32] @ 4-byte Reload
+ ldr r4, [r2, #16]
+ ldmib r1, {r2, r5, r6}
+ ldr r1, [r1, #16]
+ umull lr, r9, r6, r4
+ umull r6, r8, r5, r4
+ umull r5, r7, r2, r4
+ umull r2, r12, r3, r4
+ adds r10, r2, r10
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r3, r5, r11
+ str r10, [r0, #16]
+ adcs r5, r6, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r6, lr, r2
+ umull r2, lr, r1, r4
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r2, r1
+ mov r2, #0
+ adc r2, r2, #0
+ adds r3, r3, r12
+ adcs r7, r5, r7
+ str r3, [r0, #20]
+ adcs r6, r6, r8
+ str r7, [r0, #24]
+ adcs r1, r1, r9
+ str r6, [r0, #28]
+ adc r2, r2, lr
+ str r1, [r0, #32]
+ str r2, [r0, #36]
+ add sp, sp, #36
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end66:
+ .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre5L
+ .align 2
+ .type mcl_fpDbl_sqrPre5L,%function
+mcl_fpDbl_sqrPre5L: @ @mcl_fpDbl_sqrPre5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #32
+ sub sp, sp, #32
+ ldm r1, {r2, r3, r12}
+ ldr lr, [r1, #16]
+ ldr r9, [r1, #12]
+ umull r5, r6, r2, r2
+ umull r7, r11, r3, r2
+ str r5, [r0]
+ umull r5, r4, lr, r2
+ adds r8, r6, r7
+ str r5, [sp, #24] @ 4-byte Spill
+ umull r5, r10, r12, r2
+ str r4, [sp, #28] @ 4-byte Spill
+ adcs r4, r11, r5
+ umlal r6, r5, r3, r2
+ umull r4, r8, r9, r2
+ adcs r10, r10, r4
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adcs r8, r8, r4
+ ldr r4, [sp, #28] @ 4-byte Reload
+ adc r4, r4, #0
+ str r4, [sp, #24] @ 4-byte Spill
+ umull r2, r4, r3, r3
+ str r4, [sp, #28] @ 4-byte Spill
+ adds r4, r7, r6
+ str r4, [sp, #16] @ 4-byte Spill
+ adcs r5, r2, r5
+ umull r2, r4, r12, r3
+ str r4, [sp, #12] @ 4-byte Spill
+ adcs r4, r2, r10
+ umull r2, r6, r9, r3
+ adcs r2, r2, r8
+ umull r7, r8, lr, r3
+ ldr r3, [sp, #24] @ 4-byte Reload
+ adcs r7, r7, r3
+ mov r3, #0
+ adc r3, r3, #0
+ adds r5, r5, r11
+ str r5, [sp, #24] @ 4-byte Spill
+ ldr r5, [sp, #28] @ 4-byte Reload
+ adcs r4, r4, r5
+ str r4, [sp, #20] @ 4-byte Spill
+ ldr r4, [sp, #16] @ 4-byte Reload
+ str r4, [r0, #4]
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adcs r2, r2, r4
+ str r2, [sp, #12] @ 4-byte Spill
+ adcs r2, r7, r6
+ str r2, [sp, #8] @ 4-byte Spill
+ adc r2, r3, r8
+ str r2, [sp, #4] @ 4-byte Spill
+ umull r11, r2, lr, r12
+ umull lr, r10, r12, r12
+ str r2, [sp, #28] @ 4-byte Spill
+ ldm r1, {r4, r6}
+ ldr r2, [r1, #12]
+ ldr r7, [sp, #24] @ 4-byte Reload
+ umull r8, r3, r2, r12
+ str r3, [sp, #16] @ 4-byte Spill
+ umull r5, r3, r6, r12
+ str r3, [sp] @ 4-byte Spill
+ umull r3, r9, r4, r12
+ adds r3, r3, r7
+ str r3, [sp, #24] @ 4-byte Spill
+ ldr r3, [sp, #20] @ 4-byte Reload
+ adcs r5, r5, r3
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adcs r12, lr, r3
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adcs r7, r8, r3
+ ldr r3, [sp, #4] @ 4-byte Reload
+ adcs lr, r11, r3
+ mov r3, #0
+ adc r11, r3, #0
+ ldr r3, [sp] @ 4-byte Reload
+ adds r5, r5, r9
+ adcs r12, r12, r3
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adcs r9, r7, r10
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r8, lr, r3
+ adc r11, r11, r7
+ umull r7, r3, r4, r2
+ adds r7, r7, r5
+ str r3, [sp, #20] @ 4-byte Spill
+ umull r5, r3, r6, r2
+ ldr r6, [r1, #8]
+ str r3, [sp, #16] @ 4-byte Spill
+ adcs r10, r5, r12
+ ldr r3, [sp, #24] @ 4-byte Reload
+ ldr r5, [r1, #16]
+ str r7, [sp, #28] @ 4-byte Spill
+ umull r4, lr, r6, r2
+ adcs r12, r4, r9
+ ldr r4, [sp, #20] @ 4-byte Reload
+ umull r7, r9, r2, r2
+ str r3, [r0, #8]
+ adcs r7, r7, r8
+ umull r3, r8, r5, r2
+ adcs r2, r3, r11
+ mov r3, #0
+ adc r3, r3, #0
+ adds r11, r10, r4
+ ldr r4, [sp, #16] @ 4-byte Reload
+ adcs r4, r12, r4
+ adcs r10, r7, lr
+ adcs r12, r2, r9
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adc r8, r3, r8
+ ldr r3, [r1]
+ str r2, [r0, #12]
+ ldr r2, [r1, #4]
+ ldr r1, [r1, #12]
+ umull r7, r9, r3, r5
+ adds lr, r7, r11
+ str lr, [r0, #16]
+ umull r7, r11, r2, r5
+ adcs r2, r7, r4
+ umull r4, r7, r6, r5
+ adcs r4, r4, r10
+ umull r6, r10, r1, r5
+ adcs r1, r6, r12
+ umull r6, r3, r5, r5
+ mov r5, #0
+ adcs r6, r6, r8
+ adc r5, r5, #0
+ adds r2, r2, r9
+ adcs r4, r4, r11
+ str r2, [r0, #20]
+ adcs r1, r1, r7
+ str r4, [r0, #24]
+ adcs r7, r6, r10
+ str r1, [r0, #28]
+ adc r3, r5, r3
+ str r7, [r0, #32]
+ str r3, [r0, #36]
+ add sp, sp, #32
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end67:
+ .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont5L
+ .align 2
+ .type mcl_fp_mont5L,%function
+mcl_fp_mont5L: @ @mcl_fp_mont5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #100
+ sub sp, sp, #100
+ str r0, [sp, #52] @ 4-byte Spill
+ mov r0, r2
+ str r2, [sp, #48] @ 4-byte Spill
+ ldm r0, {r2, r8}
+ ldr r7, [r0, #8]
+ ldr r0, [r0, #12]
+ ldr r6, [r3, #-4]
+ ldr r5, [r3, #8]
+ ldr r9, [r3]
+ ldr r11, [r1, #8]
+ ldr r12, [r1, #12]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r1, #4]
+ ldr r1, [r1, #16]
+ str r6, [sp, #84] @ 4-byte Spill
+ str r5, [sp, #88] @ 4-byte Spill
+ str r9, [sp, #80] @ 4-byte Spill
+ str r11, [sp, #60] @ 4-byte Spill
+ str r12, [sp, #56] @ 4-byte Spill
+ umull r4, lr, r0, r2
+ str r0, [sp, #72] @ 4-byte Spill
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r3, #4]
+ str r1, [sp, #64] @ 4-byte Spill
+ mul r0, r4, r6
+ str r4, [sp, #36] @ 4-byte Spill
+ umull r6, r4, r0, r5
+ str r4, [sp, #28] @ 4-byte Spill
+ umull r4, r5, r0, r9
+ mov r10, r6
+ mov r9, r5
+ str r4, [sp, #32] @ 4-byte Spill
+ str r7, [sp, #76] @ 4-byte Spill
+ str r5, [sp, #12] @ 4-byte Spill
+ mov r4, r7
+ umlal r9, r10, r0, r7
+ umull r7, r5, r1, r2
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [sp, #96] @ 4-byte Reload
+ str r5, [sp, #92] @ 4-byte Spill
+ umull r5, r1, r12, r2
+ str r1, [sp, #20] @ 4-byte Spill
+ str r5, [sp, #24] @ 4-byte Spill
+ umull r12, r1, r11, r2
+ umull r11, r5, r7, r2
+ adds r7, lr, r11
+ adcs r5, r5, r12
+ ldr r5, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r5, r1
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ str r1, [sp, #68] @ 4-byte Spill
+ umull r7, r11, r0, r1
+ ldr r1, [r3, #12]
+ umull r3, r5, r0, r4
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adds r3, r4, r3
+ str r1, [sp, #92] @ 4-byte Spill
+ umull r3, r4, r0, r1
+ adcs r0, r5, r6
+ mov r1, #0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r3
+ adcs r3, r4, r7
+ ldr r7, [sp, #96] @ 4-byte Reload
+ ldr r4, [sp, #32] @ 4-byte Reload
+ adc r5, r11, #0
+ umlal lr, r12, r7, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adds r2, r4, r2
+ adcs r2, r9, lr
+ ldr r9, [sp, #64] @ 4-byte Reload
+ str r2, [sp, #36] @ 4-byte Spill
+ adcs r2, r10, r12
+ ldr r10, [sp, #72] @ 4-byte Reload
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r5, r0
+ umull r5, lr, r8, r9
+ str r0, [sp, #20] @ 4-byte Spill
+ adc r0, r1, #0
+ umull r6, r1, r8, r7
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ umull r12, r4, r8, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ umull r3, r2, r8, r0
+ umull r11, r0, r8, r10
+ ldr r10, [sp, #68] @ 4-byte Reload
+ adds r6, r0, r6
+ adcs r1, r1, r3
+ umlal r0, r3, r8, r7
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adcs r1, r2, r12
+ adcs r2, r4, r5
+ adc r6, lr, #0
+ adds r8, r7, r11
+ ldr r7, [sp, #32] @ 4-byte Reload
+ adcs r11, r7, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r7, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ mul r4, r8, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ umull r6, r1, r4, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r1, [sp, #12] @ 4-byte Spill
+ umull r1, r5, r4, r0
+ mov r0, r6
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ mov r3, r5
+ umull r12, lr, r4, r1
+ umlal r3, r0, r4, r1
+ umull r1, r2, r4, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adds r5, r5, r12
+ adcs r6, lr, r6
+ umull r5, r12, r4, r10
+ adcs r1, r7, r1
+ ldr r7, [sp, #16] @ 4-byte Reload
+ adcs r2, r2, r5
+ adc r6, r12, #0
+ adds r7, r7, r8
+ ldr r8, [sp, #60] @ 4-byte Reload
+ adcs r3, r3, r11
+ ldr r11, [sp, #72] @ 4-byte Reload
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ umull r2, r1, r0, r9
+ ldr r9, [sp, #56] @ 4-byte Reload
+ umull r3, r12, r0, r8
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r2, [sp, #4] @ 4-byte Spill
+ mov r2, r0
+ umull r4, r5, r0, r9
+ umull r6, r7, r0, r1
+ umull lr, r0, r2, r11
+ adds r6, r0, r6
+ str lr, [sp, #8] @ 4-byte Spill
+ adcs r6, r7, r3
+ ldr r7, [sp, #4] @ 4-byte Reload
+ umlal r0, r3, r2, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r12, r12, r4
+ adcs r4, r5, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adc r7, r7, #0
+ adds r2, r1, r2
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r2, [sp] @ 4-byte Spill
+ adcs r0, r1, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ mul r4, r2, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ umull r5, r1, r4, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r1, [sp, #12] @ 4-byte Spill
+ mov r2, r5
+ umull r1, r7, r4, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r1, [sp, #16] @ 4-byte Spill
+ umull r6, r1, r4, r10
+ mov r3, r7
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r6, [sp, #4] @ 4-byte Spill
+ umlal r3, r2, r4, r0
+ umull r12, lr, r4, r1
+ umull r10, r1, r4, r0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adds r4, r7, r10
+ adcs r1, r1, r5
+ ldr r4, [sp, #64] @ 4-byte Reload
+ ldr r1, [sp] @ 4-byte Reload
+ adcs r10, r0, r12
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r12, lr, r0
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc lr, r0, #0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adds r6, r0, r1
+ ldr r0, [sp, #44] @ 4-byte Reload
+ umull r5, r1, r0, r4
+ mov r6, r0
+ str r1, [sp, #16] @ 4-byte Spill
+ umull r4, r1, r0, r9
+ str r5, [sp, #8] @ 4-byte Spill
+ umull r5, r9, r0, r8
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r4, [sp] @ 4-byte Spill
+ umull r4, r8, r0, r1
+ umull r7, r0, r6, r11
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [sp, #40] @ 4-byte Reload
+ adcs r11, r3, r7
+ ldr r3, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ str r2, [sp, #40] @ 4-byte Spill
+ adcs r10, r10, r3
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r12, r12, r3
+ ldr r3, [sp, #24] @ 4-byte Reload
+ adcs r7, lr, r3
+ ldr r3, [sp, #20] @ 4-byte Reload
+ adc r2, r3, #0
+ adds r4, r0, r4
+ ldr r3, [sp, #4] @ 4-byte Reload
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [sp] @ 4-byte Reload
+ adcs r4, r8, r5
+ umlal r0, r5, r6, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r4, r9, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r3, r3, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adc r8, r2, #0
+ adds lr, r11, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r9, r10, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r10, [sp, #92] @ 4-byte Reload
+ adcs r0, r12, r4
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ mul r4, lr, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ umull r12, r3, r4, r1
+ umull r7, r11, r4, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ umull r8, r6, r4, r0
+ mov r0, r7
+ mov r5, r6
+ adds r6, r6, r12
+ umlal r5, r0, r4, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r3, r3, r7
+ umull r6, r12, r4, r1
+ umull r1, r2, r4, r10
+ adcs r1, r11, r1
+ adcs r2, r2, r6
+ adc r3, r12, #0
+ adds r7, r8, lr
+ ldr r7, [sp, #44] @ 4-byte Reload
+ adcs r7, r5, r7
+ adcs r0, r0, r9
+ ldr r9, [sp, #72] @ 4-byte Reload
+ str r7, [sp, #44] @ 4-byte Spill
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r5, [r0, #16]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ umull r4, r8, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ umull r7, r1, r5, r2
+ umull r12, lr, r5, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ umull r6, r3, r5, r0
+ umull r11, r0, r5, r9
+ ldr r9, [sp, #76] @ 4-byte Reload
+ adds r7, r0, r7
+ adcs r1, r1, r6
+ umlal r0, r6, r5, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adcs r1, r3, r12
+ ldr r12, [sp, #80] @ 4-byte Reload
+ adcs r4, lr, r4
+ ldr lr, [sp, #88] @ 4-byte Reload
+ adc r3, r8, #0
+ adds r7, r2, r11
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r11, r0, r6
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ mul r4, r7, r0
+ umull r0, r1, r4, r9
+ umull r8, r3, r4, r12
+ adds r0, r3, r0
+ umull r5, r0, r4, lr
+ adcs r1, r1, r5
+ umlal r3, r5, r4, r9
+ umull r1, r6, r4, r10
+ adcs r10, r0, r1
+ umull r1, r0, r4, r2
+ mov r4, r9
+ adcs r1, r6, r1
+ ldr r6, [sp, #96] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r8, r7
+ adcs r3, r3, r6
+ adcs r7, r5, r11
+ ldr r5, [sp, #72] @ 4-byte Reload
+ adcs r11, r10, r5
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs r8, r1, r5
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adc r9, r0, #0
+ subs r5, r3, r12
+ sbcs r4, r7, r4
+ sbcs r0, r11, lr
+ sbcs r6, r8, r1
+ sbcs r1, r10, r2
+ sbc r2, r9, #0
+ ands r2, r2, #1
+ movne r5, r3
+ ldr r3, [sp, #52] @ 4-byte Reload
+ movne r4, r7
+ movne r0, r11
+ cmp r2, #0
+ movne r6, r8
+ movne r1, r10
+ str r5, [r3]
+ str r4, [r3, #4]
+ str r0, [r3, #8]
+ str r6, [r3, #12]
+ str r1, [r3, #16]
+ add sp, sp, #100
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end68:
+ .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF5L
+ .align 2
+ .type mcl_fp_montNF5L,%function
+mcl_fp_montNF5L: @ @mcl_fp_montNF5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #76
+ sub sp, sp, #76
+ str r2, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r2, {r4, r9, r10}
+ ldr r6, [r1, #4]
+ ldr r0, [r2, #12]
+ ldr r7, [r1]
+ ldr r5, [r1, #8]
+ ldr lr, [r3, #8]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #12]
+ str r6, [sp, #32] @ 4-byte Spill
+ umull r2, r8, r6, r4
+ mov r11, r6
+ umull r6, r12, r7, r4
+ str r7, [sp, #56] @ 4-byte Spill
+ str r5, [sp, #48] @ 4-byte Spill
+ str lr, [sp, #36] @ 4-byte Spill
+ adds r7, r12, r2
+ umull r2, r7, r5, r4
+ adcs r5, r8, r2
+ umlal r12, r2, r11, r4
+ umull r5, r8, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ adcs r0, r7, r5
+ ldr r5, [r3, #4]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ str r5, [sp, #60] @ 4-byte Spill
+ umull r1, r7, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ adcs r0, r8, r1
+ ldr r1, [r3]
+ str r0, [sp, #16] @ 4-byte Spill
+ adc r0, r7, #0
+ ldr r7, [r3, #-4]
+ str r0, [sp, #12] @ 4-byte Spill
+ str r1, [sp, #40] @ 4-byte Spill
+ mul r0, r6, r7
+ str r7, [sp, #72] @ 4-byte Spill
+ umull r8, r7, r0, r1
+ ldr r1, [r3, #12]
+ ldr r3, [r3, #16]
+ adds r6, r8, r6
+ umull r4, r8, r0, r5
+ str r7, [sp, #8] @ 4-byte Spill
+ umull r5, r7, r0, lr
+ ldr lr, [sp, #64] @ 4-byte Reload
+ adcs r6, r4, r12
+ adcs r5, r5, r2
+ str r1, [sp, #52] @ 4-byte Spill
+ umull r2, r4, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r3, [sp, #44] @ 4-byte Spill
+ adcs r2, r2, r1
+ umull r12, r1, r0, r3
+ ldr r0, [sp, #16] @ 4-byte Reload
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adcs r0, r12, r0
+ adc r12, r3, #0
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adds r6, r6, r3
+ adcs r3, r5, r8
+ ldr r8, [sp, #56] @ 4-byte Reload
+ adcs r2, r2, r7
+ str r3, [sp, #16] @ 4-byte Spill
+ adcs r0, r0, r4
+ umull r7, r4, r9, r11
+ str r2, [sp, #12] @ 4-byte Spill
+ str r0, [sp, #8] @ 4-byte Spill
+ adc r0, r12, r1
+ ldr r12, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ umull r5, r1, r9, r8
+ adds r7, r1, r7
+ umull r2, r7, r9, r0
+ adcs r4, r4, r2
+ umlal r1, r2, r9, r11
+ ldr r11, [sp, #44] @ 4-byte Reload
+ umull r4, r0, r9, r12
+ adcs r4, r7, r4
+ umull r7, r3, r9, lr
+ ldr r9, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ adc r3, r3, #0
+ adds r7, r5, r6
+ ldr r5, [sp, #16] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adcs r2, r2, r5
+ ldr r5, [sp, #8] @ 4-byte Reload
+ adcs r6, r4, r5
+ ldr r4, [sp, #4] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #16] @ 4-byte Spill
+ adc r0, r3, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r5, r7, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ umull r4, r3, r5, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adds r7, r4, r7
+ ldr r4, [sp, #52] @ 4-byte Reload
+ str r3, [sp, #8] @ 4-byte Spill
+ umull r7, r3, r5, r0
+ adcs r1, r7, r1
+ umull r7, r0, r5, r9
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [sp, #8] @ 4-byte Reload
+ str r0, [sp] @ 4-byte Spill
+ adcs r2, r7, r2
+ umull r7, r0, r5, r4
+ adcs r6, r7, r6
+ umull r7, r4, r5, r11
+ ldr r5, [sp, #16] @ 4-byte Reload
+ adcs r7, r7, r5
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r1, r1, r3
+ ldr r3, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp] @ 4-byte Reload
+ adcs r1, r6, r1
+ adcs r0, r7, r0
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #8] @ 4-byte Spill
+ adc r11, r5, r4
+ str r0, [sp, #4] @ 4-byte Spill
+ umull r4, r0, r10, r8
+ ldr r8, [sp, #60] @ 4-byte Reload
+ umull r6, r5, r10, r7
+ adds r6, r0, r6
+ umull r1, r6, r10, r3
+ adcs r5, r5, r1
+ umlal r0, r1, r10, r7
+ umull r5, r2, r10, r12
+ adcs r12, r6, r5
+ umull r6, r5, r10, lr
+ mov lr, r7
+ adcs r2, r2, r6
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r6, r4, r6
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #8] @ 4-byte Reload
+ adcs r1, r1, r4
+ ldr r4, [sp, #4] @ 4-byte Reload
+ adcs r10, r12, r4
+ adcs r2, r2, r11
+ ldr r11, [sp, #40] @ 4-byte Reload
+ str r2, [sp, #8] @ 4-byte Spill
+ adc r2, r5, #0
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [sp, #72] @ 4-byte Reload
+ mul r7, r6, r2
+ umull r4, r2, r7, r11
+ adds r6, r4, r6
+ str r2, [sp, #12] @ 4-byte Spill
+ umull r6, r2, r7, r8
+ str r2, [sp, #4] @ 4-byte Spill
+ adcs r0, r6, r0
+ umull r6, r2, r7, r9
+ ldr r9, [sp, #52] @ 4-byte Reload
+ adcs r1, r6, r1
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [sp, #8] @ 4-byte Reload
+ umull r6, r12, r7, r9
+ adcs r5, r6, r10
+ ldr r10, [sp, #44] @ 4-byte Reload
+ umull r6, r4, r7, r10
+ adcs r7, r6, r2
+ ldr r6, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ adc r6, r6, #0
+ adds r0, r0, r2
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #8] @ 4-byte Spill
+ adcs r0, r7, r12
+ ldr r7, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #4] @ 4-byte Spill
+ adc r0, r6, r4
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ umull r1, r5, r7, r3
+ mov r6, r1
+ umull r4, r2, r7, r0
+ mov r0, lr
+ mov r12, r2
+ umull r3, lr, r7, r0
+ umlal r12, r6, r7, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adds r2, r2, r3
+ adcs r1, lr, r1
+ umull r1, r2, r7, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r1, r5, r1
+ umull r3, r5, r7, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r2, r2, r3
+ adc r3, r5, #0
+ ldr r5, [sp, #8] @ 4-byte Reload
+ adds r7, r4, r0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r12, r0
+ adcs r6, r6, r5
+ ldr r5, [sp, #4] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp] @ 4-byte Reload
+ adcs r2, r2, r5
+ str r2, [sp, #20] @ 4-byte Spill
+ adc r2, r3, #0
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [sp, #72] @ 4-byte Reload
+ mul r5, r7, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ umull r4, lr, r5, r11
+ adds r7, r4, r7
+ umull r7, r12, r5, r8
+ adcs r0, r7, r0
+ umull r7, r3, r5, r2
+ adcs r6, r7, r6
+ umull r7, r2, r5, r9
+ adcs r1, r7, r1
+ umull r7, r4, r5, r10
+ ldr r5, [sp, #20] @ 4-byte Reload
+ adcs r7, r7, r5
+ ldr r5, [sp, #16] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r0, r0, lr
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r10, r6, r12
+ adcs lr, r1, r3
+ adcs r8, r7, r2
+ adc r9, r5, r4
+ ldr r4, [sp, #32] @ 4-byte Reload
+ ldr r7, [r0, #16]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ umull r3, r11, r7, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mov r5, r3
+ umull r12, r2, r7, r0
+ umull r6, r0, r7, r4
+ mov r1, r2
+ adds r2, r2, r6
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ ldr r3, [sp, #68] @ 4-byte Reload
+ umlal r1, r5, r7, r4
+ umull r0, r2, r7, r3
+ umull r3, r4, r7, r6
+ adcs r0, r11, r0
+ adcs r2, r2, r3
+ adc r3, r4, #0
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adds r7, r12, r4
+ ldr r12, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r10
+ adcs r6, r5, lr
+ adcs r11, r0, r8
+ ldr r8, [sp, #40] @ 4-byte Reload
+ adcs r0, r2, r9
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ adc r0, r3, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r5, r7, r0
+ umull r4, r0, r5, r8
+ umull r3, lr, r5, r12
+ adds r7, r4, r7
+ ldr r4, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ adcs r1, r3, r1
+ ldr r9, [sp, #72] @ 4-byte Reload
+ umull r7, r0, r5, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r3, r7, r6
+ umull r6, r10, r5, r2
+ adcs r7, r6, r11
+ umull r6, r11, r5, r0
+ ldr r5, [sp, #68] @ 4-byte Reload
+ adcs r6, r6, r5
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r1, r1, r9
+ adcs lr, r3, lr
+ ldr r3, [sp, #56] @ 4-byte Reload
+ adcs r9, r7, r3
+ adcs r10, r6, r10
+ adc r11, r5, r11
+ subs r6, r1, r8
+ sbcs r5, lr, r12
+ sbcs r4, r9, r4
+ sbcs r7, r10, r2
+ sbc r3, r11, r0
+ asr r0, r3, #31
+ cmp r0, #0
+ movlt r6, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ movlt r5, lr
+ movlt r4, r9
+ cmp r0, #0
+ movlt r7, r10
+ movlt r3, r11
+ str r6, [r1]
+ str r5, [r1, #4]
+ str r4, [r1, #8]
+ str r7, [r1, #12]
+ str r3, [r1, #16]
+ add sp, sp, #76
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end69:
+ .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed5L
+ .align 2
+ .type mcl_fp_montRed5L,%function
+mcl_fp_montRed5L: @ @mcl_fp_montRed5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #84
+ sub sp, sp, #84
+ ldr r6, [r1, #4]
+ ldr r9, [r2, #-4]
+ ldr r4, [r1]
+ ldr r8, [r2, #8]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r2]
+ ldr r10, [r2, #4]
+ str r6, [sp, #48] @ 4-byte Spill
+ ldr r6, [r1, #8]
+ mul r5, r4, r9
+ str r4, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #72] @ 4-byte Spill
+ str r9, [sp, #64] @ 4-byte Spill
+ str r8, [sp, #68] @ 4-byte Spill
+ umull lr, r4, r5, r8
+ str r4, [sp, #40] @ 4-byte Spill
+ umull r4, r3, r5, r0
+ mov r12, lr
+ str r4, [sp, #44] @ 4-byte Spill
+ ldr r4, [r2, #16]
+ ldr r2, [r2, #12]
+ mov r0, r3
+ str r6, [sp, #56] @ 4-byte Spill
+ ldr r6, [r1, #12]
+ umlal r0, r12, r5, r10
+ str r4, [sp, #76] @ 4-byte Spill
+ str r2, [sp, #80] @ 4-byte Spill
+ str r6, [sp, #52] @ 4-byte Spill
+ umull r7, r6, r5, r4
+ str r6, [sp, #28] @ 4-byte Spill
+ umull r4, r6, r5, r2
+ umull r11, r2, r5, r10
+ str r7, [sp, #32] @ 4-byte Spill
+ adds r3, r3, r11
+ ldr r11, [r1, #36]
+ adcs r2, r2, lr
+ ldr r3, [sp, #24] @ 4-byte Reload
+ add lr, r1, #16
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adcs r2, r2, r4
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs r2, r6, r2
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adc r2, r2, #0
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adds r5, r3, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ ldr r3, [sp, #72] @ 4-byte Reload
+ adcs r2, r2, r0
+ mul r0, r2, r9
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r9, [r1, #28]
+ umull r6, r2, r0, r8
+ str r2, [sp, #40] @ 4-byte Spill
+ umull r2, r4, r0, r3
+ mov r5, r6
+ mov r8, r6
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #32]
+ mov r7, r4
+ umlal r7, r5, r0, r10
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm lr, {r1, r2, lr}
+ ldr r6, [sp, #56] @ 4-byte Reload
+ adcs r3, r6, r12
+ ldr r6, [sp, #52] @ 4-byte Reload
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [sp, #36] @ 4-byte Reload
+ adcs r6, r6, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ str r6, [sp, #56] @ 4-byte Spill
+ adcs r1, r1, r3
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r1, [sp, #36] @ 4-byte Spill
+ adcs r1, lr, #0
+ ldr lr, [sp, #76] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ adcs r1, r9, #0
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, #0
+ str r1, [sp, #24] @ 4-byte Spill
+ adcs r1, r11, #0
+ umull r6, r11, r0, lr
+ str r1, [sp, #20] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ umull r2, r3, r0, r1
+ umull r9, r1, r0, r10
+ adds r0, r4, r9
+ adcs r0, r1, r8
+ ldr r1, [sp, #44] @ 4-byte Reload
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r9, r0, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r0, r3, r6
+ ldr r6, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r11, r11, #0
+ adds r3, r1, r0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r0, r7, r0
+ mul r7, r0, r2
+ str r0, [sp, #12] @ 4-byte Spill
+ umull r8, r0, r7, r1
+ str r0, [sp, #4] @ 4-byte Spill
+ umull r3, r0, r7, r6
+ mov r12, r8
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [sp, #56] @ 4-byte Reload
+ mov r4, r0
+ umlal r4, r12, r7, r10
+ adcs r3, r5, r3
+ ldr r5, [sp, #40] @ 4-byte Reload
+ str r3, [sp] @ 4-byte Spill
+ ldr r3, [sp, #52] @ 4-byte Reload
+ adcs r3, r9, r3
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [sp, #36] @ 4-byte Reload
+ adcs r3, r5, r3
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r3, r11, r3
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r3, r3, #0
+ str r3, [sp, #44] @ 4-byte Spill
+ ldr r3, [sp, #24] @ 4-byte Reload
+ adcs r3, r3, #0
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [sp, #20] @ 4-byte Reload
+ adcs r3, r3, #0
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adc r3, r3, #0
+ str r3, [sp, #32] @ 4-byte Spill
+ umull r5, r3, r7, lr
+ ldr lr, [sp, #80] @ 4-byte Reload
+ str r3, [sp, #28] @ 4-byte Spill
+ umull r9, r3, r7, r10
+ str r5, [sp, #24] @ 4-byte Spill
+ adds r0, r0, r9
+ adcs r0, r3, r8
+ ldr r3, [sp, #8] @ 4-byte Reload
+ ldr r0, [sp, #4] @ 4-byte Reload
+ umull r5, r11, r7, lr
+ adcs r9, r0, r5
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r8, r0, #0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adds r3, r3, r0
+ ldr r0, [sp] @ 4-byte Reload
+ adcs r11, r4, r0
+ mul r7, r11, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ umull r3, r0, r7, r1
+ str r0, [sp, #24] @ 4-byte Spill
+ umull r1, r0, r7, r6
+ mov r5, r3
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ mov r4, r0
+ umlal r4, r5, r7, r10
+ adcs r1, r12, r1
+ umull r12, r6, r7, lr
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r9, r1
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r8, r1
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, #0
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, #0
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #32] @ 4-byte Spill
+ umull r9, r1, r7, r2
+ str r1, [sp, #20] @ 4-byte Spill
+ umull r8, r1, r7, r10
+ adds r0, r0, r8
+ ldr r8, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r3
+ ldr r3, [sp, #20] @ 4-byte Reload
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r12
+ adcs r1, r6, r9
+ adc r7, r3, #0
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adds r3, r3, r11
+ ldr r3, [sp, #56] @ 4-byte Reload
+ adcs r12, r4, r3
+ ldr r3, [sp, #52] @ 4-byte Reload
+ adcs r3, r5, r3
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ mul r4, r12, r0
+ umull r0, r1, r4, r10
+ umull r11, r5, r4, r8
+ adds r0, r5, r0
+ umull r6, r0, r4, r7
+ adcs r1, r1, r6
+ umlal r5, r6, r4, r10
+ umull r1, r3, r4, lr
+ adcs r9, r0, r1
+ umull r1, r0, r4, r2
+ adcs r1, r3, r1
+ ldr r3, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r2, r11, r12
+ ldr r2, [sp, #56] @ 4-byte Reload
+ adcs r2, r5, r2
+ adcs r3, r6, r3
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adcs lr, r9, r6
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adcs r9, r1, r6
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adc r12, r0, #0
+ subs r5, r2, r8
+ sbcs r4, r3, r10
+ sbcs r0, lr, r7
+ sbcs r6, r9, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ sbcs r1, r11, r1
+ sbc r7, r12, #0
+ ands r7, r7, #1
+ movne r5, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ movne r4, r3
+ movne r0, lr
+ cmp r7, #0
+ movne r6, r9
+ movne r1, r11
+ str r5, [r2]
+ str r4, [r2, #4]
+ str r0, [r2, #8]
+ str r6, [r2, #12]
+ str r1, [r2, #16]
+ add sp, sp, #84
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end70:
+ .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre5L
+ .align 2
+ .type mcl_fp_addPre5L,%function
+mcl_fp_addPre5L: @ @mcl_fp_addPre5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ ldm r2, {r3, r12, lr}
+ ldr r4, [r2, #12]
+ ldr r8, [r2, #16]
+ ldm r1, {r5, r6, r7}
+ ldr r2, [r1, #12]
+ ldr r1, [r1, #16]
+ adds r3, r3, r5
+ adcs r6, r12, r6
+ adcs r7, lr, r7
+ adcs r2, r4, r2
+ stm r0, {r3, r6, r7}
+ adcs r1, r8, r1
+ str r2, [r0, #12]
+ str r1, [r0, #16]
+ mov r0, #0
+ adc r0, r0, #0
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end71:
+ .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre5L
+ .align 2
+ .type mcl_fp_subPre5L,%function
+mcl_fp_subPre5L: @ @mcl_fp_subPre5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ ldm r2, {r3, r12, lr}
+ ldr r4, [r2, #12]
+ ldr r8, [r2, #16]
+ ldm r1, {r5, r6, r7}
+ ldr r2, [r1, #12]
+ ldr r1, [r1, #16]
+ subs r3, r5, r3
+ sbcs r6, r6, r12
+ sbcs r7, r7, lr
+ sbcs r2, r2, r4
+ stm r0, {r3, r6, r7}
+ sbcs r1, r1, r8
+ str r2, [r0, #12]
+ str r1, [r0, #16]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end72:
+ .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_5L
+ .align 2
+ .type mcl_fp_shr1_5L,%function
+mcl_fp_shr1_5L: @ @mcl_fp_shr1_5L
+ .fnstart
+@ BB#0:
+ .save {r4, lr}
+ push {r4, lr}
+ ldr r3, [r1, #4]
+ ldr r12, [r1]
+ ldr lr, [r1, #12]
+ ldr r2, [r1, #8]
+ ldr r1, [r1, #16]
+ lsrs r4, r3, #1
+ lsr r3, r3, #1
+ rrx r12, r12
+ lsrs r4, lr, #1
+ orr r3, r3, r2, lsl #31
+ lsr r4, lr, #1
+ rrx r2, r2
+ str r12, [r0]
+ str r3, [r0, #4]
+ orr r4, r4, r1, lsl #31
+ lsr r1, r1, #1
+ str r2, [r0, #8]
+ str r4, [r0, #12]
+ str r1, [r0, #16]
+ pop {r4, lr}
+ mov pc, lr
+.Lfunc_end73:
+ .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add5L
+ .align 2
+ .type mcl_fp_add5L,%function
+mcl_fp_add5L: @ @mcl_fp_add5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldm r2, {r12, lr}
+ ldr r9, [r2, #8]
+ ldr r5, [r2, #12]
+ ldr r8, [r2, #16]
+ ldm r1, {r6, r7}
+ ldr r2, [r1, #8]
+ ldr r4, [r1, #12]
+ ldr r1, [r1, #16]
+ adds r6, r12, r6
+ adcs r7, lr, r7
+ adcs r2, r9, r2
+ stm r0, {r6, r7}
+ adcs r5, r5, r4
+ mov r4, #0
+ str r2, [r0, #8]
+ adcs r1, r8, r1
+ str r5, [r0, #12]
+ str r1, [r0, #16]
+ adc r8, r4, #0
+ ldm r3, {r4, r12, lr}
+ ldr r9, [r3, #12]
+ ldr r3, [r3, #16]
+ subs r6, r6, r4
+ sbcs r7, r7, r12
+ sbcs r2, r2, lr
+ sbcs r12, r5, r9
+ sbcs lr, r1, r3
+ sbc r1, r8, #0
+ tst r1, #1
+ stmeq r0!, {r6, r7}
+ stmeq r0, {r2, r12, lr}
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end74:
+ .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF5L
+ .align 2
+ .type mcl_fp_addNF5L,%function
+mcl_fp_addNF5L: @ @mcl_fp_addNF5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, lr}
+ ldm r1, {r12, lr}
+ ldr r9, [r1, #8]
+ ldr r5, [r1, #12]
+ ldr r8, [r1, #16]
+ ldm r2, {r6, r7}
+ ldr r1, [r2, #8]
+ ldr r4, [r2, #12]
+ ldr r2, [r2, #16]
+ adds r6, r6, r12
+ adcs r10, r7, lr
+ adcs r9, r1, r9
+ adcs lr, r4, r5
+ ldr r4, [r3]
+ adc r12, r2, r8
+ ldmib r3, {r2, r5}
+ ldr r1, [r3, #12]
+ ldr r3, [r3, #16]
+ subs r4, r6, r4
+ sbcs r2, r10, r2
+ sbcs r5, r9, r5
+ sbcs r1, lr, r1
+ sbc r3, r12, r3
+ asr r7, r3, #31
+ cmp r7, #0
+ movlt r4, r6
+ movlt r2, r10
+ movlt r5, r9
+ cmp r7, #0
+ movlt r1, lr
+ movlt r3, r12
+ str r4, [r0]
+ str r2, [r0, #4]
+ str r5, [r0, #8]
+ str r1, [r0, #12]
+ str r3, [r0, #16]
+ pop {r4, r5, r6, r7, r8, r9, r10, lr}
+ mov pc, lr
+.Lfunc_end75:
+ .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub5L
+ .align 2
+ .type mcl_fp_sub5L,%function
+mcl_fp_sub5L: @ @mcl_fp_sub5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldm r2, {r8, r12, lr}
+ ldr r9, [r2, #12]
+ ldr r6, [r2, #16]
+ ldm r1, {r2, r7}
+ ldr r4, [r1, #8]
+ ldr r5, [r1, #12]
+ ldr r1, [r1, #16]
+ subs r8, r2, r8
+ sbcs r2, r7, r12
+ str r8, [r0]
+ sbcs r12, r4, lr
+ sbcs lr, r5, r9
+ sbcs r4, r1, r6
+ mov r1, #0
+ stmib r0, {r2, r12, lr}
+ sbc r1, r1, #0
+ str r4, [r0, #16]
+ tst r1, #1
+ popeq {r4, r5, r6, r7, r8, r9, r11, lr}
+ moveq pc, lr
+ ldm r3, {r1, r5, r6, r7}
+ ldr r3, [r3, #16]
+ adds r1, r1, r8
+ adcs r2, r5, r2
+ adcs r6, r6, r12
+ adcs r7, r7, lr
+ adc r3, r3, r4
+ stm r0, {r1, r2, r6, r7}
+ str r3, [r0, #16]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end76:
+ .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF5L
+ .align 2
+ .type mcl_fp_subNF5L,%function
+mcl_fp_subNF5L: @ @mcl_fp_subNF5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldm r2, {r12, lr}
+ ldr r9, [r2, #8]
+ ldr r5, [r2, #12]
+ ldr r8, [r2, #16]
+ ldm r1, {r6, r7}
+ ldr r2, [r1, #8]
+ ldr r4, [r1, #12]
+ ldr r1, [r1, #16]
+ subs r11, r6, r12
+ sbcs r10, r7, lr
+ sbcs lr, r2, r9
+ add r9, r3, #8
+ sbcs r12, r4, r5
+ ldm r3, {r4, r5}
+ sbc r1, r1, r8
+ ldm r9, {r2, r8, r9}
+ asr r6, r1, #31
+ adds r4, r11, r4
+ adcs r5, r10, r5
+ adcs r2, lr, r2
+ adcs r3, r12, r8
+ adc r7, r1, r9
+ cmp r6, #0
+ movge r4, r11
+ movge r5, r10
+ movge r2, lr
+ cmp r6, #0
+ movge r3, r12
+ movge r7, r1
+ str r4, [r0]
+ str r5, [r0, #4]
+ str r2, [r0, #8]
+ str r3, [r0, #12]
+ str r7, [r0, #16]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end77:
+ .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add5L
+ .align 2
+ .type mcl_fpDbl_add5L,%function
+mcl_fpDbl_add5L: @ @mcl_fpDbl_add5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #20
+ sub sp, sp, #20
+ ldr r12, [r1]
+ ldr r9, [r1, #4]
+ ldr r8, [r1, #8]
+ ldr r10, [r1, #12]
+ ldmib r2, {r6, r7}
+ ldr r5, [r2, #16]
+ ldr r11, [r2]
+ ldr r4, [r2, #12]
+ str r5, [sp] @ 4-byte Spill
+ ldr r5, [r2, #20]
+ adds lr, r11, r12
+ ldr r11, [r2, #32]
+ add r12, r1, #16
+ adcs r6, r6, r9
+ add r9, r1, #28
+ adcs r7, r7, r8
+ str r5, [sp, #4] @ 4-byte Spill
+ ldr r5, [r2, #24]
+ str r5, [sp, #12] @ 4-byte Spill
+ ldr r5, [r2, #28]
+ ldr r2, [r2, #36]
+ str r5, [sp, #16] @ 4-byte Spill
+ str r2, [sp, #8] @ 4-byte Spill
+ adcs r5, r4, r10
+ ldm r9, {r4, r8, r9}
+ ldm r12, {r1, r2, r12}
+ str lr, [r0]
+ stmib r0, {r6, r7}
+ ldr r7, [sp] @ 4-byte Reload
+ str r5, [r0, #12]
+ adcs r1, r7, r1
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r2, r7, r2
+ mov r7, #0
+ adcs r12, r1, r12
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r10, r1, r4
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r8, r11, r8
+ adcs lr, r1, r9
+ adc r1, r7, #0
+ ldr r7, [r3]
+ ldmib r3, {r4, r5, r6}
+ ldr r3, [r3, #16]
+ subs r7, r2, r7
+ sbcs r4, r12, r4
+ sbcs r5, r10, r5
+ sbcs r6, r8, r6
+ sbcs r3, lr, r3
+ sbc r1, r1, #0
+ ands r1, r1, #1
+ movne r7, r2
+ movne r4, r12
+ movne r5, r10
+ cmp r1, #0
+ movne r6, r8
+ movne r3, lr
+ str r7, [r0, #20]
+ str r4, [r0, #24]
+ str r5, [r0, #28]
+ str r6, [r0, #32]
+ str r3, [r0, #36]
+ add sp, sp, #20
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end78:
+ .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub5L
+ .align 2
+ .type mcl_fpDbl_sub5L,%function
+mcl_fpDbl_sub5L: @ @mcl_fpDbl_sub5L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #32
+ sub sp, sp, #32
+ ldr r7, [r2, #32]
+ add r8, r1, #12
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r1, #36]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldmib r2, {r9, r10, r11}
+ ldr r7, [r2, #16]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ ldr r2, [r2]
+ str r7, [sp, #12] @ 4-byte Spill
+ ldm r8, {r4, r5, r6, r7, r8}
+ ldm r1, {r1, r12, lr}
+ subs r1, r1, r2
+ sbcs r2, r12, r9
+ stm r0, {r1, r2}
+ sbcs r1, lr, r10
+ str r1, [r0, #8]
+ sbcs r1, r4, r11
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #12]
+ ldr r1, [sp] @ 4-byte Reload
+ sbcs r1, r5, r1
+ ldr r5, [sp, #16] @ 4-byte Reload
+ sbcs r2, r6, r2
+ ldr r6, [sp, #8] @ 4-byte Reload
+ str r1, [r0, #16]
+ mov r1, #0
+ sbcs r7, r7, r6
+ ldr r6, [sp, #12] @ 4-byte Reload
+ sbcs r9, r8, r6
+ ldr r6, [sp, #24] @ 4-byte Reload
+ sbcs r8, r5, r6
+ ldr r6, [sp, #28] @ 4-byte Reload
+ ldr r5, [sp, #20] @ 4-byte Reload
+ sbcs lr, r5, r6
+ sbc r12, r1, #0
+ ldm r3, {r1, r4, r5, r6}
+ ldr r3, [r3, #16]
+ adds r1, r2, r1
+ adcs r4, r7, r4
+ adcs r5, r9, r5
+ adcs r6, r8, r6
+ adc r3, lr, r3
+ ands r12, r12, #1
+ moveq r1, r2
+ moveq r4, r7
+ moveq r5, r9
+ cmp r12, #0
+ moveq r6, r8
+ moveq r3, lr
+ str r1, [r0, #20]
+ str r4, [r0, #24]
+ str r5, [r0, #28]
+ str r6, [r0, #32]
+ str r3, [r0, #36]
+ add sp, sp, #32
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end79:
+ .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre6L
+ .align 2
+ .type mcl_fp_mulUnitPre6L,%function
+mcl_fp_mulUnitPre6L: @ @mcl_fp_mulUnitPre6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r11, [r1, #12]
+ ldr r9, [r1, #16]
+ ldr r8, [r1, #20]
+ umull r4, r10, lr, r2
+ umull r1, r7, r12, r2
+ mov r5, r7
+ mov r6, r4
+ umlal r5, r6, r3, r2
+ stm r0, {r1, r5, r6}
+ umull r5, r6, r3, r2
+ umull r1, r12, r11, r2
+ adds r3, r7, r5
+ adcs r3, r6, r4
+ adcs r1, r10, r1
+ str r1, [r0, #12]
+ umull r1, r3, r9, r2
+ adcs r1, r12, r1
+ str r1, [r0, #16]
+ umull r1, r7, r8, r2
+ adcs r1, r3, r1
+ str r1, [r0, #20]
+ adc r1, r7, #0
+ str r1, [r0, #24]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end80:
+ .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre6L
+ .align 2
+ .type mcl_fpDbl_mulPre6L,%function
+mcl_fpDbl_mulPre6L: @ @mcl_fpDbl_mulPre6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #48
+ sub sp, sp, #48
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r3, [r2]
+ ldm r1, {r12, lr}
+ ldr r2, [r1, #8]
+ mov r8, r0
+ ldr r10, [r1, #12]
+ umull r0, r4, r12, r3
+ umull r6, r7, lr, r3
+ str r2, [sp, #24] @ 4-byte Spill
+ adds r6, r4, r6
+ str r0, [sp, #32] @ 4-byte Spill
+ umull r5, r6, r2, r3
+ adcs r7, r7, r5
+ umlal r4, r5, lr, r3
+ umull r7, r11, r10, r3
+ adcs r0, r6, r7
+ ldr r7, [r1, #16]
+ str r0, [sp, #40] @ 4-byte Spill
+ umull r6, r0, r7, r3
+ adcs r2, r11, r6
+ ldr r6, [r1, #20]
+ str r2, [sp, #36] @ 4-byte Spill
+ umull r11, r2, r6, r3
+ adcs r0, r0, r11
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r2, r2, #0
+ str r2, [sp, #12] @ 4-byte Spill
+ str r0, [r8]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r3, [r0, #4]
+ umull r11, r9, r12, r3
+ adds r2, r11, r4
+ umull r4, r11, lr, r3
+ str r9, [sp, #28] @ 4-byte Spill
+ adcs lr, r4, r5
+ ldr r5, [sp, #24] @ 4-byte Reload
+ str r2, [sp, #32] @ 4-byte Spill
+ umull r4, r2, r10, r3
+ str r2, [sp, #20] @ 4-byte Spill
+ umull r2, r10, r5, r3
+ ldr r5, [sp, #40] @ 4-byte Reload
+ adcs r2, r2, r5
+ ldr r5, [sp, #36] @ 4-byte Reload
+ adcs r4, r4, r5
+ umull r5, r9, r7, r3
+ ldr r7, [sp, #16] @ 4-byte Reload
+ adcs r5, r5, r7
+ umull r7, r12, r6, r3
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adcs r7, r7, r3
+ mov r3, #0
+ adc r6, r3, #0
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adds r3, lr, r3
+ adcs r2, r2, r11
+ adcs lr, r4, r10
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adcs r10, r5, r4
+ ldr r4, [r1, #8]
+ adcs r11, r7, r9
+ ldr r9, [r1, #4]
+ adc r7, r6, r12
+ ldr r6, [r0, #8]
+ ldr r0, [r1]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r9, [sp, #8] @ 4-byte Spill
+ umull r12, r5, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ adds r0, r12, r3
+ str r7, [r8, #4]
+ ldr r7, [r1, #12]
+ ldr r12, [r1, #20]
+ str r5, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #32] @ 4-byte Spill
+ umull r3, r0, r9, r6
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r3, r2
+ str r0, [sp, #12] @ 4-byte Spill
+ umull r3, r0, r4, r6
+ str r0, [sp, #20] @ 4-byte Spill
+ adcs r0, r3, lr
+ ldr lr, [r1, #16]
+ ldr r9, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #4] @ 4-byte Spill
+ umull r2, r0, r7, r6
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, r10
+ umull r10, r5, lr, r6
+ adcs r10, r10, r11
+ umull r11, r3, r12, r6
+ adcs r6, r11, r0
+ mov r0, #0
+ adc r11, r0, #0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adds r0, r9, r0
+ ldr r9, [sp, #4] @ 4-byte Reload
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r9, r2, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r10, r10, r0
+ adcs r0, r6, r5
+ ldr r5, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ adc r0, r11, r3
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ str r0, [r8, #8]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r6, [r0, #12]
+ umull r11, r3, r7, r6
+ str r3, [sp, #36] @ 4-byte Spill
+ umull r7, r3, r4, r6
+ str r3, [sp, #32] @ 4-byte Spill
+ umull r4, r3, r5, r6
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [sp, #40] @ 4-byte Reload
+ umull r5, r2, r3, r6
+ ldr r3, [sp] @ 4-byte Reload
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adds r3, r5, r3
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adcs r4, r4, r3
+ ldr r3, [sp, #24] @ 4-byte Reload
+ adcs r7, r7, r9
+ adcs r9, r11, r10
+ umull r5, r11, lr, r6
+ adcs r3, r5, r3
+ umull r5, r10, r12, r6
+ mov r6, #0
+ adcs r2, r5, r2
+ adc r5, r6, #0
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adds r12, r4, r6
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adcs lr, r7, r4
+ ldr r4, [sp, #32] @ 4-byte Reload
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adcs r9, r9, r4
+ adcs r3, r3, r7
+ adcs r2, r2, r11
+ str r3, [sp, #20] @ 4-byte Spill
+ str r2, [sp, #28] @ 4-byte Spill
+ adc r2, r5, r10
+ ldr r5, [r0, #16]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r2, [r8, #12]
+ ldr r2, [r1]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldmib r1, {r0, r6}
+ umull r7, r4, r2, r5
+ ldr r3, [r1, #12]
+ adds r2, r7, r12
+ str r4, [sp, #24] @ 4-byte Spill
+ str r2, [sp, #32] @ 4-byte Spill
+ umull r7, r2, r0, r5
+ str r2, [sp, #16] @ 4-byte Spill
+ adcs r2, r7, lr
+ str r2, [sp, #4] @ 4-byte Spill
+ umull r4, r2, r6, r5
+ str r2, [sp, #12] @ 4-byte Spill
+ adcs r2, r4, r9
+ ldr r4, [sp, #28] @ 4-byte Reload
+ ldr r9, [sp, #4] @ 4-byte Reload
+ str r2, [sp] @ 4-byte Spill
+ umull r7, r2, r3, r5
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r7, r7, r2
+ ldr r2, [r1, #16]
+ ldr r1, [r1, #20]
+ umull r10, lr, r2, r5
+ umull r11, r12, r1, r5
+ adcs r10, r10, r4
+ ldr r4, [sp, #36] @ 4-byte Reload
+ adcs r5, r11, r4
+ mov r4, #0
+ adc r11, r4, #0
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adds r4, r9, r4
+ ldr r9, [sp] @ 4-byte Reload
+ str r4, [sp, #4] @ 4-byte Spill
+ ldr r4, [sp, #16] @ 4-byte Reload
+ adcs r4, r9, r4
+ str r4, [sp, #24] @ 4-byte Spill
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adcs r4, r7, r4
+ str r4, [sp, #20] @ 4-byte Spill
+ ldr r4, [sp, #8] @ 4-byte Reload
+ adcs r10, r10, r4
+ adcs lr, r5, lr
+ ldr r5, [sp, #44] @ 4-byte Reload
+ adc r7, r11, r12
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [sp, #32] @ 4-byte Reload
+ ldr r5, [r5, #20]
+ str r7, [r8, #16]
+ umull r11, r7, r3, r5
+ str r7, [sp, #44] @ 4-byte Spill
+ umull r3, r7, r6, r5
+ umull r6, r12, r0, r5
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #40] @ 4-byte Reload
+ umull r4, r0, r7, r5
+ ldr r7, [sp, #4] @ 4-byte Reload
+ adds r9, r4, r7
+ ldr r4, [sp, #24] @ 4-byte Reload
+ str r9, [r8, #20]
+ adcs r6, r6, r4
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adcs r3, r3, r4
+ adcs r7, r11, r10
+ umull r4, r10, r2, r5
+ adcs r2, r4, lr
+ umull r4, lr, r1, r5
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r4, r1
+ mov r4, #0
+ adc r4, r4, #0
+ adds r5, r6, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r3, r3, r12
+ str r5, [r8, #24]
+ str r3, [r8, #28]
+ adcs r3, r7, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ str r3, [r8, #32]
+ adcs r2, r2, r0
+ adcs r1, r1, r10
+ str r2, [r8, #36]
+ str r1, [r8, #40]
+ adc r1, r4, lr
+ str r1, [r8, #44]
+ add sp, sp, #48
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end81:
+ .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre6L
+ .align 2
+ .type mcl_fpDbl_sqrPre6L,%function
+mcl_fpDbl_sqrPre6L: @ @mcl_fpDbl_sqrPre6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #56
+ sub sp, sp, #56
+ ldm r1, {r2, r3}
+ ldr r7, [r1, #12]
+ mov lr, r0
+ ldr r0, [r1, #8]
+ ldr r9, [r1, #16]
+ ldr r12, [r1, #20]
+ umull r10, r6, r7, r2
+ str r0, [sp, #48] @ 4-byte Spill
+ umull r4, r8, r0, r2
+ umull r5, r0, r2, r2
+ str r7, [sp, #44] @ 4-byte Spill
+ str r6, [sp, #36] @ 4-byte Spill
+ umull r6, r7, r3, r2
+ str r5, [sp, #24] @ 4-byte Spill
+ adds r11, r0, r6
+ ldr r5, [sp, #36] @ 4-byte Reload
+ str r7, [sp, #52] @ 4-byte Spill
+ adcs r7, r7, r4
+ umlal r0, r4, r3, r2
+ adcs r7, r8, r10
+ str r7, [sp, #40] @ 4-byte Spill
+ umull r7, r10, r9, r2
+ adcs r7, r5, r7
+ str r7, [sp, #32] @ 4-byte Spill
+ umull r7, r8, r12, r2
+ adcs r11, r10, r7
+ adc r2, r8, #0
+ adds r0, r6, r0
+ umull r6, r10, r3, r3
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r4, r6, r4
+ str r0, [lr]
+ umull r6, r0, r12, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ umull r5, r0, r9, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ umull r9, r12, r0, r3
+ ldr r0, [sp, #48] @ 4-byte Reload
+ umull r7, r8, r0, r3
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r3, r7, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r9, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r5, r5, r11
+ adcs r6, r6, r2
+ mov r2, #0
+ adc r2, r2, #0
+ adds r4, r4, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r11, r3, r10
+ adcs r8, r7, r8
+ ldr r7, [r1, #4]
+ adcs r10, r5, r12
+ ldr r5, [r1, #12]
+ str r0, [lr, #4]
+ ldr r0, [sp, #24] @ 4-byte Reload
+ str r7, [sp, #16] @ 4-byte Spill
+ adcs r0, r6, r0
+ ldr r6, [r1, #8]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r2, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1]
+ umull r3, r2, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ adds r0, r3, r4
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #16]
+ str r0, [sp, #52] @ 4-byte Spill
+ umull r3, r0, r7, r6
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r3, r11
+ ldr r3, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ umull r4, r0, r6, r6
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r4, r8
+ umull r12, r4, r5, r6
+ str r0, [sp, #20] @ 4-byte Spill
+ adcs r0, r12, r10
+ ldr r10, [sp, #24] @ 4-byte Reload
+ str r4, [sp, #40] @ 4-byte Spill
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r9, r0, r2, r6
+ ldr r7, [sp, #20] @ 4-byte Reload
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r9, r9, r0
+ ldr r0, [r1, #20]
+ umull r11, r8, r0, r6
+ adcs r6, r11, r3
+ mov r3, #0
+ adc r11, r3, #0
+ ldr r3, [sp, #36] @ 4-byte Reload
+ adds r3, r10, r3
+ str r3, [sp, #24] @ 4-byte Spill
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r3, r7, r3
+ ldr r7, [sp, #8] @ 4-byte Reload
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r3, r7, r3
+ str r3, [sp, #28] @ 4-byte Spill
+ adcs r3, r9, r4
+ ldr r4, [sp, #16] @ 4-byte Reload
+ ldr r9, [sp, #48] @ 4-byte Reload
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [sp] @ 4-byte Reload
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r3, r6, r3
+ str r3, [sp, #12] @ 4-byte Spill
+ umull r6, r3, r0, r5
+ adc r11, r11, r8
+ str r3, [sp, #44] @ 4-byte Spill
+ umull r3, r0, r2, r5
+ str r0, [sp, #36] @ 4-byte Spill
+ umull r2, r0, r5, r5
+ str r0, [sp, #32] @ 4-byte Spill
+ umull r0, r10, r4, r5
+ umull r4, r8, r9, r5
+ ldr r5, [sp, #24] @ 4-byte Reload
+ adds r4, r4, r5
+ ldr r5, [sp, #4] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #28] @ 4-byte Reload
+ adcs r5, r12, r5
+ adcs r2, r2, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adcs r3, r3, r7
+ mov r7, #0
+ adcs r6, r6, r11
+ adc r7, r7, #0
+ adds r9, r0, r8
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r11, r5, r10
+ adcs r0, r2, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ add r3, r1, #8
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r12, r6, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r0, [lr, #8]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ str r4, [lr, #12]
+ adc r0, r7, r0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r1, {r4, r6}
+ ldm r3, {r0, r2, r3}
+ ldr r1, [r1, #20]
+ umull r5, r7, r2, r1
+ str r5, [sp, #32] @ 4-byte Spill
+ str r7, [sp, #52] @ 4-byte Spill
+ umull r5, r7, r0, r1
+ str r5, [sp, #28] @ 4-byte Spill
+ str r7, [sp, #48] @ 4-byte Spill
+ umull r5, r7, r6, r1
+ str r5, [sp, #24] @ 4-byte Spill
+ str r7, [sp, #44] @ 4-byte Spill
+ umull r5, r7, r4, r1
+ str r5, [sp, #8] @ 4-byte Spill
+ str r7, [sp, #36] @ 4-byte Spill
+ umull r7, r5, r2, r3
+ str r5, [sp, #4] @ 4-byte Spill
+ umull r2, r5, r0, r3
+ umull r0, r10, r6, r3
+ umull r6, r8, r4, r3
+ adds r4, r6, r9
+ str r5, [sp] @ 4-byte Spill
+ adcs r11, r0, r11
+ ldr r0, [sp, #20] @ 4-byte Reload
+ str r4, [sp, #40] @ 4-byte Spill
+ umull r4, r9, r3, r3
+ adcs r5, r2, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r6, r7, r0
+ umull r0, r2, r1, r3
+ ldr r3, [sp, #12] @ 4-byte Reload
+ mov r7, #0
+ adcs r12, r4, r12
+ ldr r4, [sp] @ 4-byte Reload
+ adcs r3, r0, r3
+ adc r7, r7, #0
+ adds r8, r11, r8
+ adcs r5, r5, r10
+ adcs r6, r6, r4
+ ldr r4, [sp, #4] @ 4-byte Reload
+ adcs r4, r12, r4
+ adcs r3, r3, r9
+ adc r10, r7, r2
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adds r12, r7, r8
+ ldr r7, [sp, #24] @ 4-byte Reload
+ adcs r9, r7, r5
+ ldr r5, [sp, #28] @ 4-byte Reload
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adcs r6, r5, r6
+ ldr r5, [sp, #32] @ 4-byte Reload
+ adcs r4, r5, r4
+ adcs r0, r0, r3
+ umull r3, r8, r1, r1
+ adcs r1, r3, r10
+ mov r3, #0
+ adc r3, r3, #0
+ adds r5, r9, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ adcs r6, r6, r7
+ ldr r7, [sp, #48] @ 4-byte Reload
+ adcs r4, r4, r7
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ adcs r1, r1, r2
+ adc r2, r3, r8
+ ldr r3, [sp, #40] @ 4-byte Reload
+ str r3, [lr, #16]
+ add r3, lr, #36
+ str r12, [lr, #20]
+ str r5, [lr, #24]
+ str r6, [lr, #28]
+ str r4, [lr, #32]
+ stm r3, {r0, r1, r2}
+ add sp, sp, #56
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end82:
+ .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont6L
+ .align 2
+ .type mcl_fp_mont6L,%function
+mcl_fp_mont6L: @ @mcl_fp_mont6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #116
+ sub sp, sp, #116
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, r2
+ str r2, [sp, #60] @ 4-byte Spill
+ ldm r0, {r2, r6, r7}
+ ldr r0, [r0, #12]
+ ldr r5, [r3, #8]
+ ldr r9, [r3]
+ ldr r11, [r1, #8]
+ ldr lr, [r1, #12]
+ ldr r12, [r3, #4]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r1, #4]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1]
+ str r5, [sp, #92] @ 4-byte Spill
+ str r9, [sp, #84] @ 4-byte Spill
+ str r11, [sp, #100] @ 4-byte Spill
+ str lr, [sp, #64] @ 4-byte Spill
+ str r12, [sp, #112] @ 4-byte Spill
+ str r7, [sp, #108] @ 4-byte Spill
+ ldr r7, [r3, #-4]
+ umull r4, r8, r0, r2
+ str r0, [sp, #88] @ 4-byte Spill
+ str r4, [sp, #44] @ 4-byte Spill
+ mul r0, r4, r7
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r1, #20]
+ ldr r1, [r1, #16]
+ umull r10, r4, r0, r5
+ str r4, [sp, #36] @ 4-byte Spill
+ umull r4, r5, r0, r9
+ str r10, [sp, #16] @ 4-byte Spill
+ mov r9, r5
+ str r5, [sp, #12] @ 4-byte Spill
+ str r4, [sp, #40] @ 4-byte Spill
+ umull r5, r4, r7, r2
+ str r7, [sp, #104] @ 4-byte Spill
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #96] @ 4-byte Spill
+ umlal r9, r10, r0, r12
+ str r5, [sp, #72] @ 4-byte Spill
+ str r4, [sp, #76] @ 4-byte Spill
+ umull r5, r4, r1, r2
+ str r4, [sp, #68] @ 4-byte Spill
+ umull r1, r4, lr, r2
+ str r5, [sp, #28] @ 4-byte Spill
+ umull lr, r5, r11, r2
+ str r4, [sp, #24] @ 4-byte Spill
+ umull r11, r4, r7, r2
+ adds r7, r8, r11
+ adcs r4, r4, lr
+ ldr r7, [r3, #12]
+ adcs r1, r5, r1
+ ldr r4, [sp, #24] @ 4-byte Reload
+ ldr r5, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r4, r1
+ ldr r4, [sp, #68] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r7, [sp, #72] @ 4-byte Spill
+ adcs r1, r4, r1
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ umull r11, r4, r0, r1
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ str r4, [sp, #8] @ 4-byte Spill
+ umull r3, r4, r0, r12
+ adds r3, r5, r3
+ str r1, [sp, #68] @ 4-byte Spill
+ umull r5, r12, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r4, r4, r1
+ umull r4, r3, r0, r7
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp, #40] @ 4-byte Reload
+ adcs r1, r0, r4
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r3, r3, r5
+ adcs r4, r12, r11
+ mov r12, #0
+ adc r5, r0, #0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ umlal r8, lr, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adds r2, r7, r2
+ adcs r2, r9, r8
+ str r2, [sp, #44] @ 4-byte Spill
+ adcs r2, r10, lr
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r3, r1
+ mov r3, r0
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adcs r1, r4, r1
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r1, r5, r1
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adc r11, r12, #0
+ umull lr, r10, r6, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ umull r7, r4, r6, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ umull r5, r12, r6, r1
+ umull r1, r8, r6, r0
+ umull r9, r0, r6, r2
+ adds r1, r0, r1
+ adcs r1, r8, r5
+ ldr r8, [sp, #64] @ 4-byte Reload
+ umlal r0, r5, r6, r3
+ ldr r3, [sp, #44] @ 4-byte Reload
+ umull r1, r2, r6, r8
+ adcs r1, r12, r1
+ adcs r2, r2, r7
+ adcs r12, r4, lr
+ adc r4, r10, #0
+ adds r7, r3, r9
+ ldr r3, [sp, #40] @ 4-byte Reload
+ ldr r10, [sp, #68] @ 4-byte Reload
+ adcs r9, r3, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r11, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ mul r0, r7, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ umull lr, r3, r0, r5
+ umull r6, r12, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ umull r11, r2, r0, r1
+ mov r1, r6
+ mov r4, r2
+ adds r2, r2, lr
+ umlal r4, r1, r0, r5
+ ldr r5, [sp, #76] @ 4-byte Reload
+ adcs r3, r3, r6
+ umull r2, lr, r0, r5
+ ldr r5, [sp, #72] @ 4-byte Reload
+ umull r3, r6, r0, r5
+ adcs r12, r12, r3
+ umull r5, r3, r0, r10
+ adcs r0, r6, r5
+ adcs r2, r3, r2
+ adc r3, lr, #0
+ adds r7, r11, r7
+ adcs r7, r4, r9
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r7
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r12, r1
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ umull r4, r5, r2, r8
+ ldr r8, [sp, #88] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ umull r3, r1, r2, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r3, [sp, #8] @ 4-byte Spill
+ mov r3, r2
+ str r1, [sp, #16] @ 4-byte Spill
+ umull r6, r9, r2, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ umull r1, lr, r2, r0
+ umull r11, r0, r3, r8
+ umull r2, r12, r3, r7
+ adds r2, r0, r2
+ str r11, [sp, #12] @ 4-byte Spill
+ adcs r2, r12, r1
+ umlal r0, r1, r3, r7
+ ldr r3, [sp, #20] @ 4-byte Reload
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adcs r2, lr, r4
+ adcs r4, r5, r6
+ ldr r6, [sp, #8] @ 4-byte Reload
+ ldr r5, [sp, #16] @ 4-byte Reload
+ adcs r6, r9, r6
+ adc r5, r5, #0
+ adds r8, r3, r7
+ ldr r3, [sp, #44] @ 4-byte Reload
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ mul r0, r8, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ umull r2, r3, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r3, [sp, #16] @ 4-byte Spill
+ umull r3, r5, r0, r1
+ mov r1, r2
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [sp, #76] @ 4-byte Reload
+ mov r4, r5
+ umlal r4, r1, r0, r7
+ umull r9, r6, r0, r3
+ ldr r3, [sp, #72] @ 4-byte Reload
+ str r6, [sp, #12] @ 4-byte Spill
+ umull r6, lr, r0, r10
+ umull r12, r10, r0, r3
+ umull r11, r3, r0, r7
+ adds r0, r5, r11
+ adcs r0, r3, r2
+ ldr r3, [sp, #52] @ 4-byte Reload
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r11, r0, r12
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r10, r10, r6
+ adcs lr, lr, r9
+ adc r9, r0, #0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adds r6, r0, r8
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r8, [sp, #88] @ 4-byte Reload
+ umull r7, r2, r3, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #20] @ 4-byte Spill
+ umull r7, r2, r3, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r2, [sp, #8] @ 4-byte Spill
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [sp, #108] @ 4-byte Reload
+ umull r5, r2, r3, r0
+ str r2, [sp] @ 4-byte Spill
+ umull r2, r0, r3, r8
+ umull r6, r12, r3, r7
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adcs r4, r4, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r11, r11, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r10, r10, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, lr, r1
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r9, r1
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adc lr, r1, #0
+ adds r6, r0, r6
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r2, r12, r5
+ umlal r0, r5, r3, r7
+ ldr r2, [sp] @ 4-byte Reload
+ adcs r9, r2, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ umull r6, r2, r3, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r2, r2, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adc r8, r1, #0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adds r4, r4, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ adcs r0, r11, r5
+ ldr r5, [sp, #112] @ 4-byte Reload
+ ldr r11, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r10, r9
+ ldr r10, [sp, #80] @ 4-byte Reload
+ ldr r9, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, lr, r8
+ ldr r8, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ mul r0, r4, r10
+ umull r2, r12, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ umull r3, r7, r0, r1
+ mov r1, r2
+ str r3, [sp, #24] @ 4-byte Spill
+ umull lr, r3, r0, r5
+ mov r6, r7
+ adds r7, r7, lr
+ umlal r6, r1, r0, r5
+ adcs r2, r3, r2
+ umull r7, lr, r0, r11
+ umull r2, r3, r0, r9
+ adcs r12, r12, r2
+ umull r5, r2, r0, r8
+ adcs r0, r3, r5
+ adcs r2, r2, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ adc r3, lr, #0
+ adds r7, r7, r4
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r7, r6, r7
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r12, r1
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ ldr r3, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r4, [r0, #16]
+ ldr r0, [sp, #104] @ 4-byte Reload
+ umull r12, lr, r4, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ umull r5, r6, r4, r3
+ umull r2, r8, r4, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ umull r7, r1, r4, r0
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adds r5, r1, r5
+ umull r0, r5, r4, r7
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r6, r6, r0
+ umlal r1, r0, r4, r3
+ ldr r3, [sp, #52] @ 4-byte Reload
+ adcs r2, r5, r2
+ umull r5, r6, r4, r7
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adcs r7, r8, r5
+ adcs r6, r6, r12
+ adc r5, lr, #0
+ adds r8, r3, r4
+ ldr r3, [sp, #48] @ 4-byte Reload
+ adcs r1, r3, r1
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ mul r0, r8, r10
+ umull r5, r12, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ umull lr, r3, r0, r6
+ umull r10, r2, r0, r1
+ mov r1, r5
+ mov r4, r2
+ adds r2, r2, lr
+ adcs r3, r3, r5
+ umlal r4, r1, r0, r6
+ umull r2, lr, r0, r11
+ ldr r11, [sp, #88] @ 4-byte Reload
+ umull r3, r5, r0, r9
+ adcs r12, r12, r3
+ umull r6, r3, r0, r7
+ adcs r0, r5, r6
+ adcs r2, r3, r2
+ adc r3, lr, #0
+ adds r7, r10, r8
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r7, r4, r7
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r12, r1
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ ldr r3, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r4, [r0, #20]
+ ldr r0, [sp, #104] @ 4-byte Reload
+ umull r9, r1, r4, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ umull r2, r12, r4, r3
+ str r1, [sp, #60] @ 4-byte Spill
+ umull r7, r8, r4, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ umull r5, r6, r4, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ umull r1, lr, r4, r0
+ umull r10, r0, r4, r11
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r2, r0, r2
+ adcs r2, r12, r1
+ umlal r0, r1, r4, r3
+ ldr r3, [sp, #52] @ 4-byte Reload
+ ldr r12, [sp, #112] @ 4-byte Reload
+ adcs r2, lr, r5
+ adcs r5, r6, r7
+ ldr r6, [sp, #60] @ 4-byte Reload
+ adcs r7, r8, r9
+ ldr r9, [sp, #68] @ 4-byte Reload
+ adc r6, r6, #0
+ adds r8, r3, r10
+ ldr r3, [sp, #48] @ 4-byte Reload
+ ldr r10, [sp, #84] @ 4-byte Reload
+ adcs lr, r3, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #88] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ mul r0, r8, r1
+ umull r3, r4, r0, r10
+ umull r1, r2, r0, r12
+ adds r1, r4, r1
+ str r3, [sp, #80] @ 4-byte Spill
+ umull r6, r1, r0, r11
+ adcs r2, r2, r6
+ umlal r4, r6, r0, r12
+ umull r2, r3, r0, r5
+ adcs r1, r1, r2
+ str r1, [sp, #60] @ 4-byte Spill
+ umull r2, r1, r0, r9
+ adcs r2, r3, r2
+ str r2, [sp, #52] @ 4-byte Spill
+ umull r3, r2, r0, r7
+ adcs r1, r1, r3
+ ldr r3, [sp, #60] @ 4-byte Reload
+ adc r0, r2, #0
+ ldr r2, [sp, #80] @ 4-byte Reload
+ adds r2, r2, r8
+ ldr r2, [sp, #108] @ 4-byte Reload
+ adcs r12, r4, lr
+ adcs lr, r6, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ adcs r8, r3, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ ldr r3, [sp, #52] @ 4-byte Reload
+ adcs r6, r3, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ adcs r3, r1, r2
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r2, r0, r1
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r0, r0, #0
+ subs r4, r12, r10
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ sbcs r0, lr, r0
+ sbcs r1, r8, r11
+ mov r11, r6
+ sbcs r5, r6, r5
+ sbcs r6, r3, r9
+ mov r9, r2
+ sbcs r10, r2, r7
+ ldr r2, [sp, #108] @ 4-byte Reload
+ sbc r7, r2, #0
+ ldr r2, [sp, #56] @ 4-byte Reload
+ ands r7, r7, #1
+ movne r4, r12
+ movne r0, lr
+ movne r1, r8
+ cmp r7, #0
+ movne r5, r11
+ movne r6, r3
+ movne r10, r9
+ str r4, [r2]
+ str r0, [r2, #4]
+ str r1, [r2, #8]
+ str r5, [r2, #12]
+ str r6, [r2, #16]
+ str r10, [r2, #20]
+ add sp, sp, #116
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end83:
+ .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF6L
+ .align 2
+ .type mcl_fp_montNF6L,%function
+mcl_fp_montNF6L: @ @mcl_fp_montNF6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #88
+ sub sp, sp, #88
+ str r2, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r2, {r4, r12}
+ ldr r5, [r1, #4]
+ ldr r0, [r2, #12]
+ ldr r9, [r2, #8]
+ ldr r2, [r1]
+ ldr r7, [r1, #8]
+ ldr lr, [r3, #8]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #12]
+ str r5, [sp, #44] @ 4-byte Spill
+ umull r6, r8, r5, r4
+ mov r10, r5
+ umull r11, r5, r2, r4
+ str r2, [sp, #52] @ 4-byte Spill
+ str r7, [sp, #48] @ 4-byte Spill
+ str lr, [sp, #40] @ 4-byte Spill
+ adds r6, r5, r6
+ umull r2, r6, r7, r4
+ adcs r7, r8, r2
+ umlal r5, r2, r10, r4
+ umull r7, r8, r0, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r6, r7
+ ldr r6, [r1, #16]
+ str r0, [sp, #64] @ 4-byte Spill
+ umull r7, r0, r6, r4
+ str r6, [sp, #72] @ 4-byte Spill
+ ldr r6, [r3]
+ adcs r7, r8, r7
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r1, #20]
+ str r6, [sp, #80] @ 4-byte Spill
+ umull r1, r8, r7, r4
+ str r7, [sp, #76] @ 4-byte Spill
+ adcs r0, r0, r1
+ ldr r1, [r3, #-4]
+ str r0, [sp, #20] @ 4-byte Spill
+ adc r0, r8, #0
+ ldr r8, [r3, #4]
+ str r0, [sp, #16] @ 4-byte Spill
+ mul r0, r11, r1
+ str r1, [sp, #56] @ 4-byte Spill
+ umull r1, r7, r0, r6
+ str r8, [sp, #68] @ 4-byte Spill
+ adds r1, r1, r11
+ str r7, [sp, #12] @ 4-byte Spill
+ umull r1, r4, r0, r8
+ adcs r8, r1, r5
+ ldr r1, [r3, #12]
+ umull r5, r11, r0, lr
+ str r4, [sp, #8] @ 4-byte Spill
+ adcs r6, r5, r2
+ str r1, [sp, #84] @ 4-byte Spill
+ umull r5, r7, r0, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs lr, r5, r1
+ ldr r1, [r3, #16]
+ str r1, [sp, #64] @ 4-byte Spill
+ umull r5, r4, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r5, r5, r1
+ ldr r1, [r3, #20]
+ umull r3, r2, r0, r1
+ ldr r0, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r3, r0
+ adc r3, r1, #0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r1, r8, r1
+ ldr r8, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r1, r6, r1
+ adcs r11, lr, r11
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr lr, [sp, #76] @ 4-byte Reload
+ adcs r1, r5, r7
+ ldr r5, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r1, [sp, #12] @ 4-byte Spill
+ str r0, [sp, #8] @ 4-byte Spill
+ adc r0, r3, r2
+ umull r3, r6, r12, r10
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ umull r7, r1, r12, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adds r3, r1, r3
+ umull r2, r3, r12, r0
+ adcs r6, r6, r2
+ umlal r1, r2, r12, r10
+ ldr r10, [sp, #68] @ 4-byte Reload
+ umull r6, r0, r12, r8
+ adcs r4, r3, r6
+ umull r6, r3, r12, r5
+ adcs r5, r0, r6
+ umull r6, r0, r12, lr
+ ldr r12, [sp, #60] @ 4-byte Reload
+ adcs r3, r3, r6
+ ldr r6, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r7, r6
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #12] @ 4-byte Reload
+ adcs r2, r2, r11
+ adcs r6, r4, r6
+ ldr r4, [sp, #8] @ 4-byte Reload
+ adcs r11, r5, r4
+ ldr r5, [sp, #4] @ 4-byte Reload
+ adcs r3, r3, r5
+ adc r0, r0, #0
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mul r4, r7, r0
+ umull r0, r5, r4, r3
+ adds r0, r0, r7
+ str r5, [sp, #12] @ 4-byte Spill
+ umull r0, r3, r4, r10
+ ldr r5, [sp, #12] @ 4-byte Reload
+ str r3, [sp, #8] @ 4-byte Spill
+ adcs r3, r0, r1
+ ldr r0, [sp, #40] @ 4-byte Reload
+ umull r1, r7, r4, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r7, [sp, #4] @ 4-byte Spill
+ adcs r1, r1, r2
+ umull r2, r7, r4, r0
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adcs r2, r2, r6
+ umull r6, r0, r4, r7
+ adcs r6, r6, r11
+ umull r7, r11, r4, r12
+ ldr r4, [sp, #20] @ 4-byte Reload
+ ldr r12, [sp, #48] @ 4-byte Reload
+ adcs r4, r7, r4
+ ldr r7, [sp, #16] @ 4-byte Reload
+ adc r7, r7, #0
+ adds r3, r3, r5
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adcs r1, r1, r3
+ ldr r3, [sp, #72] @ 4-byte Reload
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp] @ 4-byte Reload
+ adcs r1, r6, r1
+ adcs r0, r4, r0
+ str r1, [sp, #8] @ 4-byte Spill
+ str r0, [sp, #4] @ 4-byte Spill
+ adc r0, r7, r11
+ ldr r11, [sp, #52] @ 4-byte Reload
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ umull r6, r1, r9, r11
+ umull r5, r4, r9, r0
+ adds r5, r1, r5
+ umull r2, r5, r9, r12
+ adcs r4, r4, r2
+ umlal r1, r2, r9, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ umull r4, r7, r9, r8
+ adcs r8, r5, r4
+ umull r5, r4, r9, r3
+ adcs r5, r7, r5
+ umull r7, r3, r9, lr
+ ldr lr, [sp, #60] @ 4-byte Reload
+ adcs r4, r4, r7
+ adc r3, r3, #0
+ adds r7, r6, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r1, r1, r0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r2, r2, r0
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r6, r8, r0
+ ldr r0, [sp, #4] @ 4-byte Reload
+ ldr r8, [sp, #56] @ 4-byte Reload
+ adcs r9, r5, r0
+ ldr r0, [sp] @ 4-byte Reload
+ adcs r0, r4, r0
+ mul r4, r7, r8
+ str r0, [sp, #20] @ 4-byte Spill
+ adc r0, r3, #0
+ ldr r3, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ umull r0, r5, r4, r3
+ adds r0, r0, r7
+ str r5, [sp, #12] @ 4-byte Spill
+ umull r0, r3, r4, r10
+ ldr r10, [sp, #40] @ 4-byte Reload
+ ldr r5, [sp, #12] @ 4-byte Reload
+ str r3, [sp, #8] @ 4-byte Spill
+ adcs r0, r0, r1
+ umull r1, r3, r4, r10
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ umull r2, r7, r4, r3
+ ldr r3, [sp, #64] @ 4-byte Reload
+ str r7, [sp] @ 4-byte Spill
+ adcs r2, r2, r6
+ umull r6, r7, r4, r3
+ adcs r6, r6, r9
+ umull r3, r9, r4, lr
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adcs r3, r3, r4
+ ldr r4, [sp, #16] @ 4-byte Reload
+ adc r4, r4, #0
+ adds r0, r0, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #8] @ 4-byte Spill
+ adcs r0, r3, r7
+ str r0, [sp, #4] @ 4-byte Spill
+ adc r0, r4, r9
+ ldr r4, [sp, #44] @ 4-byte Reload
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ umull r3, lr, r0, r12
+ ldr r12, [sp, #36] @ 4-byte Reload
+ umull r9, r2, r0, r11
+ umull r6, r7, r0, r4
+ mov r1, r2
+ adds r2, r2, r6
+ mov r5, r3
+ adcs r2, r7, r3
+ umlal r1, r5, r0, r4
+ umull r2, r3, r0, r12
+ adcs r11, lr, r2
+ ldr lr, [sp, #72] @ 4-byte Reload
+ ldr r2, [sp, #76] @ 4-byte Reload
+ umull r4, r6, r0, lr
+ adcs r3, r3, r4
+ umull r4, r7, r0, r2
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r4, r6, r4
+ adc r6, r7, #0
+ adds r0, r9, r0
+ ldr r9, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ adcs r7, r5, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ ldr r5, [sp, #4] @ 4-byte Reload
+ adcs r2, r11, r2
+ adcs r11, r3, r5
+ ldr r3, [sp] @ 4-byte Reload
+ adcs r3, r4, r3
+ mul r4, r0, r8
+ ldr r8, [sp, #80] @ 4-byte Reload
+ str r3, [sp, #24] @ 4-byte Spill
+ adc r3, r6, #0
+ str r3, [sp, #20] @ 4-byte Spill
+ umull r5, r3, r4, r8
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [sp, #68] @ 4-byte Reload
+ adds r0, r5, r0
+ umull r0, r5, r4, r3
+ str r5, [sp, #12] @ 4-byte Spill
+ ldr r5, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ umull r1, r3, r4, r10
+ ldr r10, [sp, #60] @ 4-byte Reload
+ str r3, [sp, #8] @ 4-byte Spill
+ adcs r1, r1, r7
+ umull r7, r3, r4, r5
+ adcs r2, r7, r2
+ umull r7, r5, r4, r9
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adcs r7, r7, r11
+ umull r6, r11, r4, r10
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adcs r4, r6, r4
+ ldr r6, [sp, #20] @ 4-byte Reload
+ adc r6, r6, #0
+ adds r0, r0, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ adcs r0, r4, r5
+ str r0, [sp, #8] @ 4-byte Spill
+ adc r0, r6, r11
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r5, [r0, #16]
+ umull r11, r2, r5, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ umull r4, r0, r5, r7
+ adds r4, r2, r4
+ umull r3, r4, r5, r1
+ adcs r0, r0, r3
+ umlal r2, r3, r5, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ umull r0, r6, r5, r12
+ adcs r12, r4, r0
+ umull r4, r1, r5, lr
+ adcs r4, r6, r4
+ umull r6, r0, r5, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r6
+ adc r0, r0, #0
+ adds r6, r11, r7
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r2, r2, r7
+ ldr r7, [sp, #16] @ 4-byte Reload
+ adcs r3, r3, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adcs r5, r12, r7
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r7, r4, r7
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [sp, #4] @ 4-byte Reload
+ adcs r1, r1, r7
+ adc r0, r0, #0
+ str r1, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mul r4, r6, r0
+ umull r0, r1, r4, r8
+ ldr r8, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adds r0, r0, r6
+ ldr r7, [sp, #16] @ 4-byte Reload
+ umull r0, r11, r4, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ umull r2, lr, r4, r8
+ adcs r2, r2, r3
+ umull r3, r12, r4, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r3, r3, r5
+ umull r5, r6, r4, r9
+ adcs r5, r5, r1
+ umull r1, r9, r4, r10
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r4
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adc r4, r4, #0
+ adds r0, r0, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r2, r11
+ adcs r11, r3, lr
+ str r0, [sp, #20] @ 4-byte Spill
+ adcs r10, r5, r12
+ adcs r0, r1, r6
+ str r0, [sp, #16] @ 4-byte Spill
+ adc r0, r4, r9
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r5, [r0, #20]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ umull r6, r1, r5, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r4, r6
+ umull lr, r3, r5, r0
+ umull r12, r0, r5, r7
+ mov r2, r3
+ adds r3, r3, r12
+ umlal r2, r4, r5, r7
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ umull r0, r3, r5, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adcs r12, r1, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ umull r1, r6, r5, r0
+ adcs r1, r3, r1
+ umull r3, r0, r5, r7
+ ldr r5, [sp, #24] @ 4-byte Reload
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r3, r6, r3
+ adc r0, r0, #0
+ adds r6, lr, r5
+ ldr r5, [sp, #16] @ 4-byte Reload
+ ldr lr, [sp, #68] @ 4-byte Reload
+ adcs r2, r2, r7
+ adcs r7, r4, r11
+ adcs r9, r12, r10
+ adcs r1, r1, r5
+ ldr r5, [sp, #80] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r1, r3, r1
+ adc r0, r0, #0
+ str r1, [sp, #76] @ 4-byte Spill
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mul r4, r6, r0
+ umull r0, r1, r4, r5
+ umull r3, r11, r4, lr
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adds r0, r0, r6
+ umull r6, r0, r4, r8
+ adcs r12, r3, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ adcs r10, r6, r7
+ umull r3, r0, r4, r1
+ adcs r9, r3, r9
+ ldr r3, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ umull r7, r0, r4, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r7, r7, r0
+ umull r6, r0, r4, r2
+ ldr r4, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r6, r6, r4
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adc r4, r4, #0
+ adds r12, r12, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r11, r10, r11
+ adcs r9, r9, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r10, r7, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r7, r6, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r4, r0
+ subs r5, r12, r5
+ sbcs r4, r11, lr
+ mov lr, r0
+ sbcs r6, r9, r8
+ sbcs r1, r10, r1
+ sbcs r8, r7, r3
+ sbc r3, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ asr r0, r3, #31
+ cmp r0, #0
+ movlt r5, r12
+ movlt r4, r11
+ movlt r6, r9
+ cmp r0, #0
+ movlt r1, r10
+ movlt r8, r7
+ movlt r3, lr
+ str r5, [r2]
+ str r4, [r2, #4]
+ str r6, [r2, #8]
+ str r1, [r2, #12]
+ str r8, [r2, #16]
+ str r3, [r2, #20]
+ add sp, sp, #88
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end84:
+ .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed6L
+ .align 2
+ .type mcl_fp_montRed6L,%function
+mcl_fp_montRed6L: @ @mcl_fp_montRed6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #100
+ sub sp, sp, #100
+ ldr r6, [r1, #4]
+ ldr r10, [r2, #-4]
+ ldr r9, [r1]
+ ldr r3, [r2, #8]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r2]
+ ldr r8, [r2, #4]
+ str r6, [sp, #68] @ 4-byte Spill
+ ldr r6, [r1, #8]
+ mul r4, r9, r10
+ str r3, [sp, #80] @ 4-byte Spill
+ str r0, [sp, #76] @ 4-byte Spill
+ str r10, [sp, #92] @ 4-byte Spill
+ umull r12, r7, r4, r3
+ str r7, [sp, #52] @ 4-byte Spill
+ umull r7, r3, r4, r0
+ mov lr, r12
+ str r7, [sp, #56] @ 4-byte Spill
+ mov r0, r3
+ str r6, [sp, #64] @ 4-byte Spill
+ ldr r6, [r1, #12]
+ umlal r0, lr, r4, r8
+ str r6, [sp, #60] @ 4-byte Spill
+ ldr r6, [r2, #20]
+ umull r5, r7, r4, r6
+ str r6, [sp, #84] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ ldr r2, [r2, #12]
+ str r5, [sp, #44] @ 4-byte Spill
+ str r7, [sp, #48] @ 4-byte Spill
+ umull r5, r7, r4, r6
+ str r6, [sp, #96] @ 4-byte Spill
+ str r2, [sp, #88] @ 4-byte Spill
+ str r7, [sp, #40] @ 4-byte Spill
+ umull r6, r7, r4, r2
+ umull r11, r2, r4, r8
+ adds r3, r3, r11
+ adcs r2, r2, r12
+ ldr r3, [sp, #40] @ 4-byte Reload
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r12, r2, r6
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adcs r11, r7, r5
+ adcs r2, r3, r2
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adc r2, r2, #0
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [sp, #56] @ 4-byte Reload
+ adds r6, r9, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ add r9, r1, #16
+ adcs r0, r2, r0
+ mul r6, r0, r10
+ ldr r10, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ umull r3, r0, r6, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r9, {r2, r4, r7, r9}
+ ldr r5, [sp, #76] @ 4-byte Reload
+ umull r0, r1, r6, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ ldr lr, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ mov r12, r3
+ adcs r2, r2, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, r1
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [sp, #52] @ 4-byte Reload
+ umlal r0, r12, r6, r8
+ adcs r2, r4, r2
+ ldr r4, [sp, #96] @ 4-byte Reload
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adcs r2, r7, r2
+ str r2, [sp, #48] @ 4-byte Spill
+ adcs r2, r9, #0
+ umull r9, r11, r6, lr
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #28] @ 4-byte Spill
+ mov r2, #0
+ adc r2, r2, #0
+ str r2, [sp, #24] @ 4-byte Spill
+ umull r7, r2, r6, r8
+ adds r1, r1, r7
+ adcs r2, r2, r3
+ ldr r3, [sp, #88] @ 4-byte Reload
+ umull r1, r7, r6, r4
+ umull r2, r4, r6, r3
+ ldr r6, [sp, #56] @ 4-byte Reload
+ adcs r2, r6, r2
+ adcs r1, r4, r1
+ ldr r4, [sp, #20] @ 4-byte Reload
+ str r2, [sp, #56] @ 4-byte Spill
+ str r1, [sp, #4] @ 4-byte Spill
+ adcs r1, r7, r9
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adc r7, r11, #0
+ adds r6, r4, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ mul r6, r1, r0
+ umull r9, r0, r6, r10
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r0, r1, r6, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ mov r4, r9
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r12, r0
+ adcs r5, r2, r5
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ mov r0, r1
+ str r5, [sp, #68] @ 4-byte Spill
+ ldr r5, [sp, #52] @ 4-byte Reload
+ umlal r0, r4, r6, r8
+ adcs r2, r2, r5
+ ldr r5, [sp] @ 4-byte Reload
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adcs r2, r5, r2
+ umull r5, r10, r6, lr
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adcs r2, r7, r2
+ umull r7, r12, r6, r8
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adc r2, r2, #0
+ adds r1, r1, r7
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r2, [sp, #36] @ 4-byte Spill
+ umull r7, r2, r6, r3
+ ldr r3, [sp, #8] @ 4-byte Reload
+ umull r11, lr, r6, r1
+ adcs r6, r12, r9
+ adcs r3, r3, r7
+ adcs r12, r2, r11
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adcs r2, lr, r5
+ ldr r5, [sp, #80] @ 4-byte Reload
+ ldr lr, [sp, #76] @ 4-byte Reload
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adc r9, r10, #0
+ adds r6, r3, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ ldr r3, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r6, r0, r3
+ str r0, [sp, #32] @ 4-byte Spill
+ umull r11, r0, r6, r5
+ str r0, [sp, #24] @ 4-byte Spill
+ umull r0, r7, r6, lr
+ mov r10, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ mov r2, r7
+ umlal r2, r10, r6, r8
+ adcs r0, r4, r0
+ ldr r4, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r12, r0
+ ldr r12, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ umull r4, r0, r6, r12
+ str r4, [sp, #12] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ umull r4, r0, r6, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r9, r0, r6, r8
+ adds r7, r7, r9
+ adcs r0, r0, r11
+ ldr r0, [sp, #24] @ 4-byte Reload
+ umull r7, r9, r6, r1
+ ldr r6, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r9, r4
+ ldr r4, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r7, r4, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r4, [sp, #32] @ 4-byte Reload
+ adc r11, r0, #0
+ adds r4, r6, r4
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adcs r2, r2, r4
+ mul r4, r2, r3
+ str r2, [sp, #36] @ 4-byte Spill
+ umull r9, r2, r4, r5
+ ldr r5, [sp, #68] @ 4-byte Reload
+ str r2, [sp, #28] @ 4-byte Spill
+ umull r3, r2, r4, lr
+ mov r6, r2
+ str r3, [sp, #32] @ 4-byte Spill
+ mov r3, r9
+ umlal r6, r3, r4, r8
+ adcs r5, r10, r5
+ str r5, [sp, #68] @ 4-byte Spill
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs r5, r0, r5
+ ldr r0, [sp, #16] @ 4-byte Reload
+ str r5, [sp, #64] @ 4-byte Spill
+ ldr r5, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ umull r7, r0, r4, r12
+ mov r12, r1
+ str r0, [sp, #24] @ 4-byte Spill
+ umull r11, r0, r4, r8
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [sp, #96] @ 4-byte Reload
+ umull r1, r5, r4, r12
+ adds r2, r2, r11
+ adcs r0, r0, r9
+ ldr r2, [sp, #20] @ 4-byte Reload
+ ldr r0, [sp, #28] @ 4-byte Reload
+ umull lr, r10, r4, r7
+ ldr r4, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ adcs r1, r5, lr
+ ldr r5, [sp, #24] @ 4-byte Reload
+ adcs r2, r10, r2
+ adc lr, r5, #0
+ ldr r5, [sp, #32] @ 4-byte Reload
+ adds r4, r5, r4
+ ldr r5, [sp, #76] @ 4-byte Reload
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adcs r9, r6, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adcs r3, r3, r4
+ str r3, [sp, #68] @ 4-byte Spill
+ ldr r3, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ mul r0, r9, r1
+ umull r2, r4, r0, r5
+ umull r1, r3, r0, r8
+ adds r1, r4, r1
+ str r2, [sp, #92] @ 4-byte Spill
+ umull r1, r2, r0, r6
+ adcs r3, r3, r1
+ umlal r4, r1, r0, r8
+ umull r3, lr, r0, r12
+ adcs r10, r2, r3
+ umull r3, r2, r0, r7
+ adcs r11, lr, r3
+ ldr lr, [sp, #84] @ 4-byte Reload
+ umull r7, r3, r0, lr
+ adcs r2, r2, r7
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adc r0, r3, #0
+ ldr r3, [sp, #92] @ 4-byte Reload
+ adds r3, r3, r9
+ ldr r3, [sp, #68] @ 4-byte Reload
+ adcs r3, r4, r3
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r12, r1, r7
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r10, r10, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r9, r11, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r7, r2, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r11, r0, #0
+ subs r0, r3, r5
+ sbcs r5, r12, r8
+ mov r8, r7
+ sbcs r2, r10, r6
+ ldr r6, [sp, #96] @ 4-byte Reload
+ sbcs r4, r9, r4
+ sbcs r6, r7, r6
+ sbcs r7, r1, lr
+ mov lr, r1
+ sbc r1, r11, #0
+ ands r1, r1, #1
+ movne r0, r3
+ ldr r3, [sp, #72] @ 4-byte Reload
+ movne r5, r12
+ movne r2, r10
+ cmp r1, #0
+ movne r4, r9
+ movne r6, r8
+ movne r7, lr
+ str r0, [r3]
+ str r5, [r3, #4]
+ str r2, [r3, #8]
+ str r4, [r3, #12]
+ str r6, [r3, #16]
+ str r7, [r3, #20]
+ add sp, sp, #100
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end85:
+ .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre6L
+ .align 2
+ .type mcl_fp_addPre6L,%function
+mcl_fp_addPre6L: @ @mcl_fp_addPre6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, lr}
+ ldm r1, {r9, r12, lr}
+ ldr r10, [r1, #12]
+ ldr r5, [r1, #16]
+ ldr r8, [r1, #20]
+ ldm r2, {r6, r7}
+ add r4, r2, #8
+ ldm r4, {r1, r3, r4}
+ ldr r2, [r2, #20]
+ adds r6, r6, r9
+ adcs r7, r7, r12
+ add r12, r0, #8
+ adcs r1, r1, lr
+ stm r0, {r6, r7}
+ adcs r3, r3, r10
+ adcs r5, r4, r5
+ adcs r2, r2, r8
+ stm r12, {r1, r3, r5}
+ str r2, [r0, #20]
+ mov r0, #0
+ adc r0, r0, #0
+ pop {r4, r5, r6, r7, r8, r9, r10, lr}
+ mov pc, lr
+.Lfunc_end86:
+ .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre6L
+ .align 2
+ .type mcl_fp_subPre6L,%function
+mcl_fp_subPre6L: @ @mcl_fp_subPre6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, lr}
+ ldm r2, {r9, r12, lr}
+ ldr r10, [r2, #12]
+ ldr r5, [r2, #16]
+ ldr r8, [r2, #20]
+ ldm r1, {r6, r7}
+ add r4, r1, #8
+ ldm r4, {r2, r3, r4}
+ ldr r1, [r1, #20]
+ subs r6, r6, r9
+ sbcs r7, r7, r12
+ add r12, r0, #8
+ sbcs r2, r2, lr
+ stm r0, {r6, r7}
+ sbcs r3, r3, r10
+ sbcs r5, r4, r5
+ sbcs r1, r1, r8
+ stm r12, {r2, r3, r5}
+ str r1, [r0, #20]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ pop {r4, r5, r6, r7, r8, r9, r10, lr}
+ mov pc, lr
+.Lfunc_end87:
+ .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_6L
+ .align 2
+ .type mcl_fp_shr1_6L,%function
+mcl_fp_shr1_6L: @ @mcl_fp_shr1_6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, lr}
+ push {r4, r5, r6, lr}
+ ldr r3, [r1, #4]
+ ldr r12, [r1]
+ ldr lr, [r1, #12]
+ ldr r2, [r1, #8]
+ ldr r4, [r1, #16]
+ ldr r1, [r1, #20]
+ lsrs r5, r3, #1
+ lsr r3, r3, #1
+ rrx r12, r12
+ lsrs r5, lr, #1
+ orr r6, r3, r2, lsl #31
+ lsr r5, lr, #1
+ rrx r2, r2
+ lsrs r3, r1, #1
+ lsr r1, r1, #1
+ str r12, [r0]
+ str r6, [r0, #4]
+ orr r5, r5, r4, lsl #31
+ rrx r3, r4
+ str r2, [r0, #8]
+ str r5, [r0, #12]
+ str r3, [r0, #16]
+ str r1, [r0, #20]
+ pop {r4, r5, r6, lr}
+ mov pc, lr
+.Lfunc_end88:
+ .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add6L
+ .align 2
+ .type mcl_fp_add6L,%function
+mcl_fp_add6L: @ @mcl_fp_add6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldm r1, {r9, r12, lr}
+ ldr r7, [r2]
+ ldr r10, [r1, #12]
+ ldr r11, [r1, #16]
+ ldr r8, [r1, #20]
+ ldmib r2, {r1, r4, r5, r6}
+ ldr r2, [r2, #20]
+ adds r7, r7, r9
+ adcs r12, r1, r12
+ add r1, r0, #8
+ adcs r4, r4, lr
+ stm r0, {r7, r12}
+ adcs r5, r5, r10
+ adcs r6, r6, r11
+ stm r1, {r4, r5, r6}
+ adcs r2, r2, r8
+ mov r1, #0
+ str r2, [r0, #20]
+ adc r9, r1, #0
+ ldm r3, {r1, lr}
+ ldr r10, [r3, #8]
+ ldr r11, [r3, #12]
+ ldr r8, [r3, #16]
+ ldr r3, [r3, #20]
+ subs r7, r7, r1
+ sbcs r1, r12, lr
+ sbcs r10, r4, r10
+ sbcs r12, r5, r11
+ sbcs lr, r6, r8
+ sbcs r4, r2, r3
+ sbc r2, r9, #0
+ tst r2, #1
+ streq r7, [r0]
+ stmibeq r0, {r1, r10, r12, lr}
+ streq r4, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end89:
+ .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF6L
+ .align 2
+ .type mcl_fp_addNF6L,%function
+mcl_fp_addNF6L: @ @mcl_fp_addNF6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ add r11, r1, #8
+ ldm r1, {r12, lr}
+ ldm r11, {r9, r10, r11}
+ ldr r7, [r2]
+ ldr r8, [r1, #20]
+ ldmib r2, {r1, r4, r5, r6}
+ ldr r2, [r2, #20]
+ adds r7, r7, r12
+ adcs r1, r1, lr
+ adcs r4, r4, r9
+ adcs r9, r5, r10
+ adcs lr, r6, r11
+ add r11, r3, #8
+ adc r12, r2, r8
+ ldm r3, {r2, r6}
+ ldm r11, {r5, r8, r10, r11}
+ subs r2, r7, r2
+ sbcs r6, r1, r6
+ sbcs r5, r4, r5
+ sbcs r3, r9, r8
+ sbcs r8, lr, r10
+ sbc r10, r12, r11
+ asr r11, r10, #31
+ cmp r11, #0
+ movlt r2, r7
+ movlt r6, r1
+ movlt r5, r4
+ cmp r11, #0
+ movlt r3, r9
+ movlt r8, lr
+ movlt r10, r12
+ str r2, [r0]
+ str r6, [r0, #4]
+ str r5, [r0, #8]
+ str r3, [r0, #12]
+ str r8, [r0, #16]
+ str r10, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end90:
+ .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub6L
+ .align 2
+ .type mcl_fp_sub6L,%function
+mcl_fp_sub6L: @ @mcl_fp_sub6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldr r9, [r2]
+ ldmib r2, {r8, r12, lr}
+ ldr r10, [r2, #16]
+ ldr r11, [r2, #20]
+ ldm r1, {r2, r4, r5, r6, r7}
+ ldr r1, [r1, #20]
+ subs r9, r2, r9
+ sbcs r2, r4, r8
+ str r9, [r0]
+ sbcs r12, r5, r12
+ sbcs lr, r6, lr
+ sbcs r4, r7, r10
+ stmib r0, {r2, r12, lr}
+ sbcs r5, r1, r11
+ mov r1, #0
+ str r4, [r0, #16]
+ sbc r1, r1, #0
+ str r5, [r0, #20]
+ tst r1, #1
+ popeq {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ moveq pc, lr
+ ldm r3, {r1, r6, r7, r8, r10}
+ ldr r3, [r3, #20]
+ adds r1, r1, r9
+ adcs r2, r6, r2
+ adcs r7, r7, r12
+ adcs r6, r8, lr
+ stm r0, {r1, r2, r7}
+ adcs r4, r10, r4
+ str r6, [r0, #12]
+ adc r3, r3, r5
+ str r4, [r0, #16]
+ str r3, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end91:
+ .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF6L
+ .align 2
+ .type mcl_fp_subNF6L,%function
+mcl_fp_subNF6L: @ @mcl_fp_subNF6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ add r11, r2, #8
+ ldm r2, {r12, lr}
+ ldm r11, {r9, r10, r11}
+ ldr r7, [r1]
+ ldr r8, [r2, #20]
+ ldmib r1, {r2, r4, r5, r6}
+ ldr r1, [r1, #20]
+ subs r7, r7, r12
+ sbcs r2, r2, lr
+ sbcs r9, r4, r9
+ sbcs lr, r5, r10
+ ldr r5, [r3, #4]
+ sbcs r12, r6, r11
+ ldr r6, [r3]
+ add r11, r3, #8
+ sbc r1, r1, r8
+ ldm r11, {r4, r8, r10, r11}
+ adds r6, r7, r6
+ adcs r5, r2, r5
+ adcs r4, r9, r4
+ adcs r3, lr, r8
+ adcs r8, r12, r10
+ adc r10, r1, r11
+ asr r11, r1, #31
+ cmp r11, #0
+ movge r6, r7
+ movge r5, r2
+ movge r4, r9
+ cmp r11, #0
+ movge r3, lr
+ movge r8, r12
+ movge r10, r1
+ str r6, [r0]
+ str r5, [r0, #4]
+ str r4, [r0, #8]
+ str r3, [r0, #12]
+ str r8, [r0, #16]
+ str r10, [r0, #20]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end92:
+ .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add6L
+ .align 2
+ .type mcl_fpDbl_add6L,%function
+mcl_fpDbl_add6L: @ @mcl_fpDbl_add6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #32
+ sub sp, sp, #32
+ ldm r1, {r12, lr}
+ ldr r8, [r1, #8]
+ ldr r10, [r1, #12]
+ ldmib r2, {r6, r7}
+ ldr r5, [r2, #16]
+ ldr r11, [r2]
+ ldr r4, [r2, #12]
+ str r5, [sp] @ 4-byte Spill
+ ldr r5, [r2, #20]
+ adds r9, r11, r12
+ add r11, r1, #32
+ adcs r6, r6, lr
+ add lr, r1, #16
+ adcs r7, r7, r8
+ str r5, [sp, #4] @ 4-byte Spill
+ ldr r5, [r2, #24]
+ str r5, [sp, #16] @ 4-byte Spill
+ ldr r5, [r2, #28]
+ str r5, [sp, #28] @ 4-byte Spill
+ ldr r5, [r2, #32]
+ str r5, [sp, #8] @ 4-byte Spill
+ ldr r5, [r2, #36]
+ str r5, [sp, #12] @ 4-byte Spill
+ ldr r5, [r2, #40]
+ ldr r2, [r2, #44]
+ str r5, [sp, #20] @ 4-byte Spill
+ str r2, [sp, #24] @ 4-byte Spill
+ adcs r5, r4, r10
+ ldm r11, {r4, r8, r11}
+ ldr r10, [r1, #44]
+ ldm lr, {r1, r2, r12, lr}
+ str r9, [r0]
+ stmib r0, {r6, r7}
+ ldr r6, [sp] @ 4-byte Reload
+ str r5, [r0, #12]
+ ldr r5, [sp, #4] @ 4-byte Reload
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r1, r6, r1
+ adcs r2, r5, r2
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r5, [r3]
+ str r2, [r0, #20]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r1, r1, r12
+ adcs r2, r2, lr
+ adcs r12, r7, r4
+ ldr r7, [sp, #12] @ 4-byte Reload
+ mov r4, #0
+ adcs r9, r7, r8
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r8, r7, r11
+ ldr r7, [sp, #24] @ 4-byte Reload
+ adcs lr, r7, r10
+ adc r7, r4, #0
+ ldmib r3, {r4, r6, r10, r11}
+ subs r5, r1, r5
+ ldr r3, [r3, #20]
+ sbcs r4, r2, r4
+ sbcs r6, r12, r6
+ sbcs r10, r9, r10
+ sbcs r11, r8, r11
+ sbcs r3, lr, r3
+ sbc r7, r7, #0
+ ands r7, r7, #1
+ movne r5, r1
+ movne r4, r2
+ movne r6, r12
+ cmp r7, #0
+ add r1, r0, #32
+ movne r10, r9
+ movne r11, r8
+ movne r3, lr
+ str r5, [r0, #24]
+ str r4, [r0, #28]
+ stm r1, {r6, r10, r11}
+ str r3, [r0, #44]
+ add sp, sp, #32
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end93:
+ .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub6L
+ .align 2
+ .type mcl_fpDbl_sub6L,%function
+mcl_fpDbl_sub6L: @ @mcl_fpDbl_sub6L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ ldr r6, [r2, #8]
+ ldr r7, [r2, #32]
+ add r10, r1, #12
+ str r6, [sp] @ 4-byte Spill
+ ldr r6, [r2, #12]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r6, [sp, #8] @ 4-byte Spill
+ ldr r6, [r2, #20]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r6, [sp, #12] @ 4-byte Spill
+ ldr r6, [r2, #24]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r1, #44]
+ str r6, [sp, #16] @ 4-byte Spill
+ ldr r6, [r2, #28]
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r2, #4]
+ ldr r2, [r2]
+ str r6, [sp, #20] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldm r1, {r11, r12, lr}
+ ldr r6, [sp] @ 4-byte Reload
+ subs r2, r11, r2
+ ldr r11, [r1, #40]
+ sbcs r7, r12, r7
+ ldr r12, [r1, #36]
+ ldr r1, [r1, #32]
+ sbcs lr, lr, r6
+ ldr r6, [sp, #4] @ 4-byte Reload
+ stm r0, {r2, r7, lr}
+ mov lr, #0
+ ldr r2, [sp, #8] @ 4-byte Reload
+ sbcs r4, r4, r6
+ str r4, [r0, #12]
+ sbcs r2, r5, r2
+ ldr r5, [sp, #24] @ 4-byte Reload
+ str r2, [r0, #16]
+ ldr r2, [sp, #12] @ 4-byte Reload
+ sbcs r2, r8, r2
+ str r2, [r0, #20]
+ ldr r2, [sp, #16] @ 4-byte Reload
+ sbcs r7, r9, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ sbcs r6, r10, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ sbcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ sbcs r10, r12, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ sbcs r9, r11, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ sbcs r8, r5, r2
+ sbc r12, lr, #0
+ ldm r3, {r2, r4, r5, lr}
+ ldr r11, [r3, #16]
+ ldr r3, [r3, #20]
+ adds r2, r7, r2
+ adcs r4, r6, r4
+ adcs r5, r1, r5
+ adcs lr, r10, lr
+ adcs r11, r9, r11
+ adc r3, r8, r3
+ ands r12, r12, #1
+ moveq r2, r7
+ moveq r4, r6
+ moveq r5, r1
+ cmp r12, #0
+ moveq lr, r10
+ moveq r11, r9
+ moveq r3, r8
+ str r2, [r0, #24]
+ str r4, [r0, #28]
+ str r5, [r0, #32]
+ str lr, [r0, #36]
+ str r11, [r0, #40]
+ str r3, [r0, #44]
+ add sp, sp, #44
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end94:
+ .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre7L
+ .align 2
+ .type mcl_fp_mulUnitPre7L,%function
+mcl_fp_mulUnitPre7L: @ @mcl_fp_mulUnitPre7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r11, [r1, #12]
+ ldr r10, [r1, #16]
+ ldr r9, [r1, #20]
+ ldr r8, [r1, #24]
+ umull r7, r1, lr, r2
+ umull lr, r4, r12, r2
+ mov r5, r4
+ mov r6, r7
+ str lr, [r0]
+ umlal r5, r6, r3, r2
+ stmib r0, {r5, r6}
+ umull r6, r5, r3, r2
+ adds r3, r4, r6
+ umull r3, r6, r11, r2
+ adcs r7, r5, r7
+ adcs r1, r1, r3
+ str r1, [r0, #12]
+ umull r1, r3, r10, r2
+ adcs r1, r6, r1
+ str r1, [r0, #16]
+ umull r1, r7, r9, r2
+ adcs r1, r3, r1
+ str r1, [r0, #20]
+ umull r1, r3, r8, r2
+ adcs r1, r7, r1
+ str r1, [r0, #24]
+ adc r1, r3, #0
+ str r1, [r0, #28]
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end95:
+ .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre7L
+ .align 2
+ .type mcl_fpDbl_mulPre7L,%function
+mcl_fpDbl_mulPre7L: @ @mcl_fpDbl_mulPre7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #68
+ sub sp, sp, #68
+ mov r3, r2
+ ldr r7, [r1]
+ ldr lr, [r1, #4]
+ mov r9, r0
+ ldr r0, [r1, #8]
+ ldr r2, [r1, #12]
+ ldr r10, [r1, #16]
+ ldr r8, [r1, #20]
+ str r3, [sp, #64] @ 4-byte Spill
+ ldr r3, [r3]
+ str r9, [sp, #60] @ 4-byte Spill
+ str r7, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ str r2, [sp, #44] @ 4-byte Spill
+ umull r5, r4, r7, r3
+ umull r6, r12, lr, r3
+ adds r6, r4, r6
+ str r5, [sp, #48] @ 4-byte Spill
+ umull r5, r6, r0, r3
+ adcs r7, r12, r5
+ umlal r4, r5, lr, r3
+ umull r7, r11, r2, r3
+ adcs r0, r6, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ umull r6, r0, r10, r3
+ adcs r2, r11, r6
+ umull r11, r7, r8, r3
+ ldr r6, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r2, [sp, #40] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ umull r11, r12, r0, r3
+ adcs r2, r7, r11
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r2, [r9]
+ ldr r2, [sp, #64] @ 4-byte Reload
+ ldr r3, [r2, #4]
+ umull r11, r7, r6, r3
+ str r7, [sp, #32] @ 4-byte Spill
+ adc r7, r12, #0
+ str r7, [sp, #16] @ 4-byte Spill
+ adds r7, r11, r4
+ str r7, [sp, #48] @ 4-byte Spill
+ umull r4, r7, lr, r3
+ str r7, [sp, #28] @ 4-byte Spill
+ adcs r7, r4, r5
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [sp, #44] @ 4-byte Reload
+ umull r4, r5, r7, r3
+ ldr r7, [sp, #56] @ 4-byte Reload
+ str r5, [sp, #24] @ 4-byte Spill
+ umull r5, r6, r7, r3
+ ldr r7, [sp, #52] @ 4-byte Reload
+ str r6, [sp, #44] @ 4-byte Spill
+ ldr r6, [sp, #20] @ 4-byte Reload
+ adcs r11, r5, r7
+ ldr r7, [sp, #40] @ 4-byte Reload
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adcs lr, r4, r7
+ umull r9, r7, r10, r3
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adcs r7, r9, r7
+ umull r4, r9, r8, r3
+ adcs r4, r4, r6
+ umull r6, r12, r0, r3
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r3, r6, r0
+ mov r0, #0
+ adc r6, r0, #0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adds r8, r5, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r5, r11, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, lr, r0
+ ldr lr, [r1, #12]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r7, r7, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r7, [sp, #24] @ 4-byte Spill
+ adcs r7, r4, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r4, [r1, #4]
+ adcs r3, r3, r9
+ ldr r9, [r1, #8]
+ str r7, [sp, #36] @ 4-byte Spill
+ str r3, [sp, #40] @ 4-byte Spill
+ adc r3, r6, r12
+ ldr r6, [r2, #8]
+ str r3, [sp, #44] @ 4-byte Spill
+ ldr r3, [sp, #48] @ 4-byte Reload
+ str r4, [sp, #52] @ 4-byte Spill
+ str r3, [r0, #4]
+ ldr r3, [r1]
+ umull r12, r7, r3, r6
+ str r3, [sp, #56] @ 4-byte Spill
+ str r7, [sp, #32] @ 4-byte Spill
+ adds r3, r12, r8
+ umull r7, r0, r4, r6
+ ldr r12, [r1, #24]
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r2, r7, r5
+ umull r7, r0, r9, r6
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r10, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #12] @ 4-byte Spill
+ umull r5, r0, lr, r6
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ umull r11, r3, r0, r6
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [r1, #20]
+ adcs r11, r11, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ umull r8, r4, r3, r6
+ adcs r8, r8, r0
+ umull r7, r0, r12, r6
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adcs r6, r7, r6
+ mov r7, #0
+ adc r7, r7, #0
+ adds r2, r2, r10
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r2, r5, r2
+ ldr r5, [sp, #4] @ 4-byte Reload
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r10, r5, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r11, r11, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r2, r8, r2
+ ldr r8, [sp, #56] @ 4-byte Reload
+ str r2, [sp, #28] @ 4-byte Spill
+ adcs r2, r6, r4
+ adc r0, r7, r0
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r2, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ str r0, [r7, #8]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r6, [r0, #12]
+ umull r2, r4, lr, r6
+ str r4, [sp, #48] @ 4-byte Spill
+ umull lr, r4, r9, r6
+ str r4, [sp, #44] @ 4-byte Spill
+ ldr r4, [sp, #52] @ 4-byte Reload
+ umull r9, r5, r4, r6
+ str r5, [sp, #32] @ 4-byte Spill
+ umull r4, r5, r8, r6
+ str r5, [sp, #52] @ 4-byte Spill
+ ldr r5, [sp] @ 4-byte Reload
+ adds r4, r4, r5
+ umull r5, r8, r3, r6
+ str r4, [sp, #56] @ 4-byte Spill
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adcs r9, r9, r4
+ adcs lr, lr, r10
+ adcs r11, r2, r11
+ ldr r2, [sp, #24] @ 4-byte Reload
+ umull r4, r10, r2, r6
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r4, r4, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adcs r3, r5, r2
+ umull r5, r2, r12, r6
+ ldr r6, [sp, #40] @ 4-byte Reload
+ adcs r12, r5, r6
+ ldr r6, [sp, #52] @ 4-byte Reload
+ mov r5, #0
+ adc r5, r5, #0
+ adds r9, r9, r6
+ ldr r6, [sp, #32] @ 4-byte Reload
+ adcs lr, lr, r6
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adcs r6, r11, r6
+ ldr r11, [r1, #8]
+ str r6, [sp, #20] @ 4-byte Spill
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adcs r4, r4, r6
+ adcs r3, r3, r10
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [r1, #12]
+ adcs r12, r12, r8
+ str r3, [sp, #40] @ 4-byte Spill
+ adc r2, r5, r2
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r2, [r7, #12]
+ ldr r7, [r0, #16]
+ ldr r0, [r1]
+ ldr r2, [r1, #4]
+ umull r8, r3, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ str r2, [sp, #52] @ 4-byte Spill
+ adds r0, r8, r9
+ str r3, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ umull r6, r0, r2, r7
+ ldr r2, [r1, #24]
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, lr
+ ldr lr, [r1, #16]
+ str r0, [sp, #16] @ 4-byte Spill
+ umull r6, r0, r11, r7
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r6, r0
+ mov r6, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ umull r3, r0, r4, r7
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r3, r0
+ ldr r3, [r1, #20]
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r10, r0, lr, r7
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ umull r9, r5, r3, r7
+ adcs r10, r10, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r9, r9, r12
+ umull r8, r12, r2, r7
+ adcs r7, r8, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r8, r6, #0
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adds r0, r6, r0
+ ldr r6, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r10, r10, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r7, r5
+ ldr r7, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r8, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ str r7, [r0, #16]
+ ldr r7, [sp, #64] @ 4-byte Reload
+ ldr r7, [r7, #20]
+ umull r8, r6, r4, r7
+ str r6, [sp, #48] @ 4-byte Spill
+ umull r4, r6, r11, r7
+ str r6, [sp, #40] @ 4-byte Spill
+ ldr r6, [sp, #52] @ 4-byte Reload
+ umull r11, r5, r6, r7
+ ldr r6, [sp, #56] @ 4-byte Reload
+ str r5, [sp, #28] @ 4-byte Spill
+ umull r5, r9, r6, r7
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adds r6, r5, r6
+ str r6, [sp, #44] @ 4-byte Spill
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adcs r11, r11, r6
+ ldr r6, [sp, #12] @ 4-byte Reload
+ adcs r12, r4, r6
+ ldr r6, [sp, #24] @ 4-byte Reload
+ adcs r10, r8, r10
+ umull r5, r8, lr, r7
+ umull r4, lr, r3, r7
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r5, r5, r6
+ adcs r3, r4, r3
+ umull r4, r6, r2, r7
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adcs r2, r4, r2
+ mov r4, #0
+ adc r4, r4, #0
+ adds r7, r11, r9
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r7, r12, r7
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [sp, #40] @ 4-byte Reload
+ adcs r9, r10, r7
+ ldr r7, [sp, #48] @ 4-byte Reload
+ adcs r11, r5, r7
+ adcs r3, r3, r8
+ adcs r2, r2, lr
+ str r3, [sp, #40] @ 4-byte Spill
+ str r2, [sp, #52] @ 4-byte Spill
+ adc r2, r4, r6
+ ldr r6, [r1]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r4, [r0, #24]
+ ldmib r1, {r0, r3, r5}
+ umull r12, r2, r5, r4
+ str r2, [sp, #64] @ 4-byte Spill
+ umull r5, r2, r3, r4
+ umull r3, r10, r0, r4
+ umull r0, r8, r6, r4
+ ldr r6, [r1, #16]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adds r0, r0, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs lr, r3, r0
+ adcs r9, r5, r9
+ adcs r11, r12, r11
+ umull r0, r12, r6, r4
+ ldr r6, [r1, #20]
+ ldr r1, [r1, #24]
+ adcs r0, r0, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ umull r3, r5, r6, r4
+ umull r6, r7, r1, r4
+ ldr r1, [sp, #56] @ 4-byte Reload
+ mov r4, #0
+ adcs r3, r3, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adcs r1, r6, r1
+ adc r4, r4, #0
+ adds r6, lr, r8
+ adcs lr, r9, r10
+ adcs r8, r11, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ adcs r3, r3, r12
+ adcs r1, r1, r5
+ ldr r5, [sp, #48] @ 4-byte Reload
+ adc r7, r4, r7
+ add r12, r2, #24
+ stm r12, {r5, r6, lr}
+ str r8, [r2, #36]
+ str r0, [r2, #40]
+ str r3, [r2, #44]
+ str r1, [r2, #48]
+ str r7, [r2, #52]
+ add sp, sp, #68
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end96:
+ .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre7L
+ .align 2
+ .type mcl_fpDbl_sqrPre7L,%function
+mcl_fpDbl_sqrPre7L: @ @mcl_fpDbl_sqrPre7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ ldr r9, [r1, #20]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r1, {r2, r3}
+ ldr r0, [r1, #8]
+ ldr r11, [r1, #12]
+ umull r6, r7, r2, r2
+ str r0, [sp, #48] @ 4-byte Spill
+ umull r5, r4, r0, r2
+ umull r12, r0, r3, r2
+ umull r8, r10, r11, r2
+ adds lr, r7, r12
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ adcs r6, r0, r5
+ umlal r7, r5, r3, r2
+ adcs r0, r4, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ umull r4, r6, r0, r2
+ adcs r4, r10, r4
+ mov r10, r9
+ str r4, [sp, #40] @ 4-byte Spill
+ umull r4, r8, r10, r2
+ adcs r6, r6, r4
+ str r6, [sp, #28] @ 4-byte Spill
+ ldr r6, [r1, #24]
+ umull lr, r9, r6, r2
+ adcs r4, r8, lr
+ str r4, [sp, #20] @ 4-byte Spill
+ adc r4, r9, #0
+ adds r2, r12, r7
+ ldr r12, [sp, #56] @ 4-byte Reload
+ str r2, [sp, #36] @ 4-byte Spill
+ umull r2, r7, r3, r3
+ adcs r2, r2, r5
+ str r7, [sp, #16] @ 4-byte Spill
+ umull r5, r8, r11, r3
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r2, [r12]
+ umull lr, r2, r6, r3
+ str r2, [sp, #32] @ 4-byte Spill
+ umull r6, r2, r10, r3
+ str r2, [sp, #24] @ 4-byte Spill
+ umull r2, r10, r0, r3
+ ldr r0, [sp, #48] @ 4-byte Reload
+ umull r7, r9, r0, r3
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r3, r7, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r5, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ mov r5, #0
+ adcs r2, r2, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r6, r6, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs lr, lr, r4
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r11, r4, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ ldr r4, [r1, #4]
+ adcs r3, r3, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ str r4, [sp, #44] @ 4-byte Spill
+ adcs r7, r7, r9
+ adcs r9, r2, r8
+ ldr r2, [r1, #12]
+ str r0, [r12, #4]
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r12, r6, r10
+ adcs r10, lr, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr lr, [r1, #8]
+ adc r0, r5, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1]
+ umull r8, r5, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ adds r0, r8, r11
+ str r5, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ umull r5, r0, r4, lr
+ ldr r4, [r1, #16]
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r5, r3
+ str r0, [sp, #20] @ 4-byte Spill
+ umull r3, r0, lr, lr
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r3, r7
+ ldr r3, [r1, #20]
+ ldr r7, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ umull r0, r5, r2, lr
+ str r0, [sp, #12] @ 4-byte Spill
+ adcs r0, r0, r9
+ ldr r9, [sp, #20] @ 4-byte Reload
+ str r5, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #4] @ 4-byte Spill
+ umull r11, r0, r4, lr
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r8, r0, r3, lr
+ adcs r11, r11, r12
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ adcs r8, r8, r10
+ umull r10, r12, r0, lr
+ adcs lr, r10, r7
+ mov r7, #0
+ adc r10, r7, #0
+ ldr r7, [sp, #32] @ 4-byte Reload
+ adds r6, r9, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r6, [sp, #20] @ 4-byte Spill
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adcs r6, r6, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r6, [sp, #16] @ 4-byte Spill
+ ldr r6, [sp, #4] @ 4-byte Reload
+ adcs r6, r6, r7
+ adcs r11, r11, r5
+ ldr r5, [sp, #8] @ 4-byte Reload
+ adcs r5, r8, r5
+ str r5, [sp, #8] @ 4-byte Spill
+ ldr r5, [sp] @ 4-byte Reload
+ adcs r7, lr, r5
+ str r7, [sp, #4] @ 4-byte Spill
+ adc r7, r10, r12
+ ldr r10, [sp, #48] @ 4-byte Reload
+ str r7, [sp] @ 4-byte Spill
+ umull r9, r7, r0, r2
+ umull r5, r0, r3, r2
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ umull r3, r0, r4, r2
+ str r0, [sp, #28] @ 4-byte Spill
+ umull r4, r0, r2, r2
+ str r0, [sp, #24] @ 4-byte Spill
+ umull r8, lr, r10, r2
+ umull r0, r12, r7, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ mov r7, #0
+ adds r8, r8, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ adcs r6, r2, r6
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r4, r4, r11
+ adcs r3, r3, r2
+ ldr r2, [sp, #4] @ 4-byte Reload
+ adcs r5, r5, r2
+ ldr r2, [sp] @ 4-byte Reload
+ adcs r2, r9, r2
+ adc r9, r7, #0
+ adds r0, r0, lr
+ adcs r7, r6, r12
+ ldr r6, [sp, #36] @ 4-byte Reload
+ adcs r4, r4, r6
+ ldr r6, [sp, #24] @ 4-byte Reload
+ adcs r11, r3, r6
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r12, r5, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ ldr r5, [r1, #12]
+ adcs r10, r2, r3
+ ldr r2, [sp, #40] @ 4-byte Reload
+ ldr r3, [sp, #56] @ 4-byte Reload
+ adc r2, r9, r2
+ ldr r9, [r1, #4]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r9, [sp, #16] @ 4-byte Spill
+ str r2, [r3, #8]
+ str r8, [r3, #12]
+ ldr r2, [r1]
+ ldr r3, [r1, #16]
+ ldr r8, [r1, #8]
+ umull lr, r6, r2, r3
+ str r2, [sp, #48] @ 4-byte Spill
+ str r8, [sp, #4] @ 4-byte Spill
+ adds r0, lr, r0
+ ldr lr, [r1, #24]
+ str r6, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ umull r0, r2, r9, r3
+ adcs r0, r0, r7
+ str r2, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ umull r7, r0, r8, r3
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r7, r4
+ ldr r9, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r7, r0, r5, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r7, r11
+ mov r7, #0
+ str r0, [sp] @ 4-byte Spill
+ umull r11, r0, r3, r3
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ adcs r11, r11, r12
+ umull r12, r2, r0, r3
+ adcs r4, r12, r10
+ umull r10, r8, lr, r3
+ ldr r3, [sp, #44] @ 4-byte Reload
+ str r2, [sp, #40] @ 4-byte Spill
+ adcs r3, r10, r3
+ adc r10, r7, #0
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adds r6, r9, r7
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r6, [sp, #36] @ 4-byte Spill
+ ldr r6, [sp, #8] @ 4-byte Reload
+ adcs r6, r6, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r6, [sp, #20] @ 4-byte Spill
+ ldr r6, [sp] @ 4-byte Reload
+ adcs r6, r6, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r6, [sp, #8] @ 4-byte Spill
+ adcs r11, r11, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adcs r4, r4, r7
+ adcs r2, r3, r2
+ ldr r3, [sp, #4] @ 4-byte Reload
+ str r2, [sp, #24] @ 4-byte Spill
+ umull r6, r2, r5, r0
+ adc r10, r10, r8
+ str r2, [sp, #44] @ 4-byte Spill
+ umull r5, r2, r3, r0
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [sp, #16] @ 4-byte Reload
+ umull r8, r3, r2, r0
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r3, [sp, #28] @ 4-byte Spill
+ umull r3, r9, r2, r0
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adds r2, r3, r2
+ ldr r3, [sp, #24] @ 4-byte Reload
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r7, r8, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r5, r5, r2
+ adcs r6, r6, r11
+ adcs r2, r12, r4
+ umull r4, r8, r0, r0
+ adcs r4, r4, r3
+ umull r3, r11, lr, r0
+ adcs r0, r3, r10
+ mov r3, #0
+ adc r3, r3, #0
+ adds r7, r7, r9
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r9, r5, r7
+ ldr r5, [sp, #32] @ 4-byte Reload
+ adcs r6, r6, r5
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adcs r10, r2, r6
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adcs r12, r4, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ adc r0, r3, r11
+ ldr r3, [r1, #24]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r0, [r2, #16]
+ ldr r0, [sp, #36] @ 4-byte Reload
+ str r0, [r2, #20]
+ ldm r1, {r0, r4}
+ ldr r5, [r1, #12]
+ ldr r2, [r1, #8]
+ umull lr, r6, r5, r3
+ umull r5, r11, r2, r3
+ umull r2, r8, r4, r3
+ str r6, [sp, #52] @ 4-byte Spill
+ umull r4, r6, r0, r3
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adds r0, r4, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r9, r2, r9
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r4, r5, r0
+ ldr r0, [r1, #16]
+ ldr r1, [r1, #20]
+ adcs r10, lr, r10
+ umull r7, lr, r0, r3
+ adcs r0, r7, r12
+ umull r7, r12, r1, r3
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r7, r1
+ umull r7, r5, r3, r3
+ ldr r3, [sp, #48] @ 4-byte Reload
+ adcs r3, r7, r3
+ mov r7, #0
+ adc r7, r7, #0
+ adds r6, r9, r6
+ adcs r4, r4, r8
+ adcs r8, r10, r11
+ adcs r0, r0, r2
+ adcs r1, r1, lr
+ adcs r2, r3, r12
+ adc r3, r7, r5
+ ldr r7, [sp, #56] @ 4-byte Reload
+ ldr r5, [sp, #40] @ 4-byte Reload
+ add r12, r7, #40
+ str r5, [r7, #24]
+ str r6, [r7, #28]
+ str r4, [r7, #32]
+ str r8, [r7, #36]
+ stm r12, {r0, r1, r2, r3}
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end97:
+ .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont7L
+ .align 2
+ .type mcl_fp_mont7L,%function
+mcl_fp_mont7L: @ @mcl_fp_mont7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #124
+ sub sp, sp, #124
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, r2
+ str r2, [sp, #60] @ 4-byte Spill
+ ldm r0, {r2, lr}
+ ldr r7, [r0, #8]
+ ldr r0, [r0, #12]
+ ldr r5, [r3, #-4]
+ ldr r6, [r3, #8]
+ ldr r9, [r3, #4]
+ ldr r11, [r1, #8]
+ ldr r12, [r1, #12]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r1, #4]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1]
+ str r5, [sp, #80] @ 4-byte Spill
+ str r6, [sp, #116] @ 4-byte Spill
+ str r9, [sp, #108] @ 4-byte Spill
+ str r11, [sp, #104] @ 4-byte Spill
+ str r12, [sp, #72] @ 4-byte Spill
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r3]
+ umull r4, r8, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ mul r0, r4, r5
+ str r4, [sp, #44] @ 4-byte Spill
+ umull r10, r4, r0, r6
+ str r4, [sp, #32] @ 4-byte Spill
+ str r10, [sp, #8] @ 4-byte Spill
+ umull r4, r5, r0, r7
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r4, [sp, #40] @ 4-byte Spill
+ mov r4, r5
+ str r5, [sp, #4] @ 4-byte Spill
+ umlal r4, r10, r0, r9
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r1, #24]
+ umull r6, r5, r4, r2
+ str r4, [sp, #88] @ 4-byte Spill
+ ldr r4, [r1, #20]
+ ldr r1, [r1, #16]
+ str r6, [sp, #96] @ 4-byte Spill
+ str r5, [sp, #120] @ 4-byte Spill
+ umull r6, r5, r4, r2
+ str r4, [sp, #64] @ 4-byte Spill
+ umull r9, r4, r1, r2
+ str r1, [sp, #100] @ 4-byte Spill
+ str r6, [sp, #76] @ 4-byte Spill
+ str r5, [sp, #92] @ 4-byte Spill
+ str r4, [sp, #20] @ 4-byte Spill
+ umull r6, r5, r12, r2
+ umull r12, r4, r11, r2
+ umull r11, r1, r7, r2
+ adds r7, r8, r11
+ adcs r7, r1, r12
+ adcs r1, r4, r6
+ ldr r4, [sp, #20] @ 4-byte Reload
+ ldr r6, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ adcs r1, r5, r9
+ ldr r5, [r3, #12]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r5, [sp, #76] @ 4-byte Spill
+ adcs r1, r4, r1
+ ldr r4, [sp, #92] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r4, r1
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ umull r9, r4, r0, r1
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [r3, #20]
+ umull r3, r7, r0, r6
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r1, [sp, #120] @ 4-byte Spill
+ adds r3, r6, r3
+ str r4, [sp, #92] @ 4-byte Spill
+ umull r3, r6, r0, r5
+ ldr r5, [sp, #8] @ 4-byte Reload
+ adcs r7, r7, r5
+ ldr r5, [sp, #32] @ 4-byte Reload
+ adcs r11, r5, r3
+ umull r7, r5, r0, r1
+ adcs r1, r6, r7
+ umull r7, r3, r0, r4
+ ldr r4, [sp] @ 4-byte Reload
+ ldr r6, [sp, #40] @ 4-byte Reload
+ adcs r0, r5, r7
+ ldr r5, [sp, #68] @ 4-byte Reload
+ adcs r3, r3, r9
+ adc r7, r4, #0
+ mov r4, #0
+ umlal r8, r12, r5, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adds r2, r6, r2
+ mov r6, r5
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, r8
+ str r2, [sp, #44] @ 4-byte Spill
+ adcs r2, r10, r12
+ ldr r10, [sp, #84] @ 4-byte Reload
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r2, r11, r2
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r3, r0
+ umull r2, r3, lr, r5
+ ldr r5, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ adc r0, r4, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ umull r12, r9, lr, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ umull r8, r4, lr, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ umull r1, r7, lr, r0
+ umull r11, r0, lr, r10
+ adds r2, r0, r2
+ adcs r2, r3, r1
+ umlal r0, r1, lr, r6
+ ldr r6, [sp, #40] @ 4-byte Reload
+ umull r2, r3, lr, r5
+ adcs r2, r7, r2
+ adcs r10, r3, r8
+ ldr r8, [sp, #64] @ 4-byte Reload
+ umull r7, r3, lr, r8
+ adcs r4, r4, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ adcs r3, r3, r12
+ adc r5, r9, #0
+ adds r7, r7, r11
+ adcs r0, r6, r0
+ ldr r6, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ mul r0, r7, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ umull lr, r12, r0, r6
+ umull r3, r4, r0, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ mov r2, r3
+ umull r9, r5, r0, r1
+ mov r1, r5
+ adds r5, r5, lr
+ umlal r1, r2, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ adcs r3, r12, r3
+ umull r5, lr, r0, r6
+ ldr r6, [sp, #76] @ 4-byte Reload
+ umull r3, r12, r0, r6
+ ldr r6, [sp, #92] @ 4-byte Reload
+ adcs r3, r4, r3
+ adcs r12, r12, r5
+ umull r4, r5, r0, r6
+ adcs lr, lr, r4
+ umull r6, r4, r0, r10
+ adcs r0, r5, r6
+ adc r4, r4, #0
+ adds r5, r9, r7
+ ldr r9, [sp, #84] @ 4-byte Reload
+ ldr r5, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r3, r1
+ ldr r3, [sp, #68] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r12, r1
+ ldr r12, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, lr, r1
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ umull r2, r1, r12, r0
+ umull r10, r0, r12, r8
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r2, [sp, #8] @ 4-byte Spill
+ str r1, [sp, #12] @ 4-byte Spill
+ umull r2, lr, r12, r3
+ umull r7, r8, r12, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ umull r5, r6, r12, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ umull r1, r4, r12, r0
+ umull r11, r0, r12, r9
+ adds r2, r0, r2
+ str r11, [sp] @ 4-byte Spill
+ adcs r2, lr, r1
+ umlal r0, r1, r12, r3
+ adcs lr, r4, r5
+ ldmib sp, {r4, r5}
+ ldr r3, [sp, #44] @ 4-byte Reload
+ ldr r2, [sp] @ 4-byte Reload
+ adcs r7, r6, r7
+ adcs r6, r8, r10
+ adcs r4, r4, r5
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r9, r3, r2
+ ldr r3, [sp, #40] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ mul r0, r9, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ umull r3, r2, r0, r1
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [sp, #112] @ 4-byte Reload
+ umull r7, r1, r0, r2
+ mov r2, r3
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [sp, #96] @ 4-byte Reload
+ mov r5, r1
+ umlal r5, r2, r0, r6
+ umull r10, r4, r0, r7
+ ldr r7, [sp, #92] @ 4-byte Reload
+ str r4, [sp, #8] @ 4-byte Spill
+ umull r12, r8, r0, r7
+ ldr r7, [sp, #120] @ 4-byte Reload
+ umull lr, r4, r0, r7
+ umull r11, r7, r0, r6
+ ldr r6, [sp, #8] @ 4-byte Reload
+ adds r1, r1, r11
+ ldr r11, [sp, #76] @ 4-byte Reload
+ adcs r1, r7, r3
+ umull r1, r3, r0, r11
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ adcs r1, r3, lr
+ adcs r3, r4, r12
+ ldr r4, [sp, #16] @ 4-byte Reload
+ adcs r7, r8, r10
+ ldr r10, [sp, #52] @ 4-byte Reload
+ ldr r8, [sp, #64] @ 4-byte Reload
+ adc r6, r6, #0
+ adds r4, r4, r9
+ ldr r9, [sp, #72] @ 4-byte Reload
+ ldr r4, [sp, #48] @ 4-byte Reload
+ adcs r5, r5, r4
+ str r5, [sp, #48] @ 4-byte Spill
+ ldr r5, [sp, #44] @ 4-byte Reload
+ adcs r2, r2, r5
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ umull r4, r5, r10, r7
+ adcs r0, r6, r0
+ str r4, [sp, #16] @ 4-byte Spill
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ umull r1, r6, r10, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ umull r2, r3, r10, r0
+ adds r2, r5, r2
+ adcs r2, r3, r1
+ umull r2, r3, r10, r9
+ adcs r7, r6, r2
+ ldr r6, [sp, #100] @ 4-byte Reload
+ umull r2, r12, r10, r6
+ adcs r6, r3, r2
+ umull r3, lr, r10, r8
+ mov r2, r10
+ ldr r10, [sp, #88] @ 4-byte Reload
+ adcs r4, r12, r3
+ umlal r5, r1, r2, r0
+ umull r3, r12, r2, r10
+ mov r10, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r3, lr, r3
+ adc r12, r12, #0
+ adds lr, r0, r2
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ mul r0, lr, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ umull r5, r12, r0, r7
+ umull r3, r6, r0, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ umull r2, r4, r0, r1
+ str r2, [sp, #20] @ 4-byte Spill
+ mov r1, r4
+ mov r2, r3
+ adds r4, r4, r5
+ umlal r1, r2, r0, r7
+ ldr r7, [sp, #120] @ 4-byte Reload
+ adcs r3, r12, r3
+ umull r3, r12, r0, r11
+ adcs r11, r6, r3
+ ldr r3, [sp, #92] @ 4-byte Reload
+ umull r4, r5, r0, r7
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r12, r12, r4
+ umull r4, r6, r0, r3
+ adcs r4, r5, r4
+ umull r5, r3, r0, r7
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r0, r6, r5
+ ldr r5, [sp, #20] @ 4-byte Reload
+ adc r3, r3, #0
+ adds r6, r5, lr
+ adcs r1, r1, r7
+ ldr r7, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r11, r1
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r12, r1
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r11, r4, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r0, [r0, #16]
+ umull lr, r6, r0, r8
+ umull r5, r3, r0, r10
+ umull r8, r2, r0, r1
+ umull r12, r4, r0, r9
+ adds r5, r2, r5
+ umull r1, r5, r0, r7
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adcs r3, r3, r1
+ umlal r2, r1, r0, r10
+ adcs r9, r5, r12
+ umull r5, r3, r0, r7
+ ldr r7, [sp, #108] @ 4-byte Reload
+ adcs r12, r4, r5
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs lr, r3, lr
+ umull r5, r3, r0, r4
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r5, r6, r5
+ adc r3, r3, #0
+ adds r4, r0, r8
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r11, r12
+ ldr r11, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ mul r1, r4, r11
+ adcs r0, r0, lr
+ umull lr, r12, r1, r7
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ umull r2, r6, r1, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mov r3, r2
+ umull r8, r5, r1, r0
+ mov r0, r5
+ adds r5, r5, lr
+ umlal r0, r3, r1, r7
+ ldr r7, [sp, #120] @ 4-byte Reload
+ adcs r2, r12, r2
+ umull r5, lr, r1, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ umull r2, r12, r1, r7
+ ldr r7, [sp, #92] @ 4-byte Reload
+ adcs r9, r6, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ adcs r12, r12, r5
+ umull r5, r6, r1, r7
+ adcs lr, lr, r5
+ umull r7, r5, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r1, r6, r7
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adc r5, r5, #0
+ adds r4, r8, r4
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r12, r0
+ mov r12, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r0, [r0, #20]
+ umull lr, r8, r0, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ umull r6, r3, r0, r12
+ umull r4, r5, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ umull r10, r2, r0, r1
+ adds r6, r2, r6
+ umull r1, r6, r0, r7
+ ldr r7, [sp, #88] @ 4-byte Reload
+ adcs r3, r3, r1
+ umlal r2, r1, r0, r12
+ ldr r3, [sp, #100] @ 4-byte Reload
+ adcs r9, r6, r4
+ umull r4, r6, r0, r3
+ adcs r4, r5, r4
+ adcs r3, r6, lr
+ umull r5, r6, r0, r7
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r7, [sp, #108] @ 4-byte Reload
+ adcs r5, r8, r5
+ adc r6, r6, #0
+ adds lr, r0, r10
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r10, r0, r2
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ mul r1, lr, r11
+ ldr r11, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r6
+ umull r6, r12, r1, r7
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ umull r3, r4, r1, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mov r2, r3
+ umull r8, r5, r1, r0
+ mov r0, r5
+ adds r5, r5, r6
+ umlal r0, r2, r1, r7
+ ldr r7, [sp, #120] @ 4-byte Reload
+ adcs r3, r12, r3
+ umull r5, r6, r1, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ umull r3, r12, r1, r7
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r9, r4, r3
+ ldr r3, [sp, #92] @ 4-byte Reload
+ adcs r12, r12, r5
+ umull r4, r5, r1, r3
+ adcs r4, r6, r4
+ umull r6, r3, r1, r7
+ adcs r1, r5, r6
+ adc r3, r3, #0
+ adds r6, r8, lr
+ adcs r0, r0, r10
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r12, r0
+ ldr r12, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r0, [r0, #24]
+ umull r3, r2, r0, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r2, [sp, #60] @ 4-byte Spill
+ str r3, [sp, #20] @ 4-byte Spill
+ umull r3, lr, r0, r12
+ umull r9, r2, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r2, [sp, #88] @ 4-byte Spill
+ umull r7, r8, r0, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ umull r5, r6, r0, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ umull r2, r4, r0, r1
+ umull r10, r1, r0, r11
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r3, r1, r3
+ str r10, [sp, #104] @ 4-byte Spill
+ ldr r10, [sp, #96] @ 4-byte Reload
+ adcs r3, lr, r2
+ umlal r1, r2, r0, r12
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs lr, r4, r5
+ ldr r5, [sp, #20] @ 4-byte Reload
+ ldr r3, [sp, #88] @ 4-byte Reload
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r6, r6, r7
+ adcs r7, r8, r9
+ ldr r8, [sp, #108] @ 4-byte Reload
+ adcs r5, r3, r5
+ ldr r3, [sp, #104] @ 4-byte Reload
+ adc r4, r4, #0
+ adds r9, r0, r3
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ ldr lr, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r1, r9, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ umull r2, r3, r1, r8
+ umull r4, r5, r1, r0
+ adds r2, r5, r2
+ umull r0, r2, r1, r7
+ ldr r7, [sp, #120] @ 4-byte Reload
+ adcs r3, r3, r0
+ umull r3, r12, r1, lr
+ adcs r6, r2, r3
+ umull r3, r2, r1, r7
+ adcs r12, r12, r3
+ umull r7, r3, r1, r11
+ adcs r2, r2, r7
+ str r2, [sp, #80] @ 4-byte Spill
+ umull r7, r2, r1, r10
+ adcs r3, r3, r7
+ mov r7, r8
+ umlal r5, r0, r1, r7
+ adc r1, r2, #0
+ adds r2, r4, r9
+ ldr r2, [sp, #104] @ 4-byte Reload
+ adcs r8, r5, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ ldr r5, [sp, #116] @ 4-byte Reload
+ adcs r9, r0, r2
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r2, [sp, #80] @ 4-byte Reload
+ adcs r4, r6, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r4, [sp, #88] @ 4-byte Spill
+ adcs r6, r12, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r6, [sp, #100] @ 4-byte Spill
+ adcs r12, r2, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r2, r3, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r2, [sp, #104] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ subs r1, r8, r1
+ sbcs r3, r9, r7
+ ldr r7, [sp, #120] @ 4-byte Reload
+ sbcs r5, r4, r5
+ sbcs r6, r6, lr
+ sbcs r4, r12, r7
+ sbcs r11, r2, r11
+ ldr r2, [sp, #84] @ 4-byte Reload
+ sbcs lr, r0, r10
+ sbc r7, r2, #0
+ ldr r2, [sp, #56] @ 4-byte Reload
+ ands r7, r7, #1
+ movne r1, r8
+ movne r3, r9
+ str r1, [r2]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r3, [r2, #4]
+ movne r5, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ cmp r7, #0
+ movne r4, r12
+ str r5, [r2, #8]
+ movne r6, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r6, [r2, #12]
+ str r4, [r2, #16]
+ movne r11, r1
+ cmp r7, #0
+ movne lr, r0
+ str r11, [r2, #20]
+ str lr, [r2, #24]
+ add sp, sp, #124
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end98:
+ .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF7L
+ .align 2
+ .type mcl_fp_montNF7L,%function
+mcl_fp_montNF7L: @ @mcl_fp_montNF7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #104
+ sub sp, sp, #104
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, r2
+ str r2, [sp, #40] @ 4-byte Spill
+ ldm r0, {r4, r12}
+ ldr r6, [r1, #4]
+ ldr r2, [r0, #8]
+ ldr r7, [r1]
+ ldr r0, [r0, #12]
+ ldr r5, [r1, #8]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #12]
+ umull r9, r8, r6, r4
+ umull lr, r10, r7, r4
+ str r6, [sp, #52] @ 4-byte Spill
+ mov r11, r6
+ str r7, [sp, #96] @ 4-byte Spill
+ str r5, [sp, #80] @ 4-byte Spill
+ str r2, [sp] @ 4-byte Spill
+ adds r6, r10, r9
+ umull r6, r9, r5, r4
+ ldr r5, [r1, #20]
+ adcs r7, r8, r6
+ umlal r10, r6, r11, r4
+ umull r7, r8, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r9, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ str r5, [sp, #44] @ 4-byte Spill
+ umull r7, r9, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ adcs r0, r8, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ umull r7, r0, r5, r4
+ adcs r5, r9, r7
+ ldr r7, [r3, #4]
+ str r5, [sp, #76] @ 4-byte Spill
+ ldr r5, [r1, #24]
+ str r7, [sp, #72] @ 4-byte Spill
+ umull r1, r9, r5, r4
+ str r5, [sp, #68] @ 4-byte Spill
+ ldr r5, [r3]
+ adcs r0, r0, r1
+ ldr r1, [r3, #-4]
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r9, #0
+ ldr r9, [r3, #8]
+ str r0, [sp, #24] @ 4-byte Spill
+ str r5, [sp, #56] @ 4-byte Spill
+ mul r0, lr, r1
+ str r1, [sp, #60] @ 4-byte Spill
+ umull r1, r2, r0, r5
+ str r9, [sp, #100] @ 4-byte Spill
+ adds r1, r1, lr
+ str r2, [sp, #20] @ 4-byte Spill
+ umull r1, lr, r0, r7
+ adcs r11, r1, r10
+ umull r5, r1, r0, r9
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [r3, #12]
+ adcs r9, r5, r6
+ str r1, [sp, #92] @ 4-byte Spill
+ umull r5, r10, r0, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r7, r5, r1
+ ldr r1, [r3, #16]
+ str r1, [sp, #88] @ 4-byte Spill
+ umull r5, r8, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r4, r5, r1
+ ldr r1, [r3, #20]
+ str r1, [sp, #84] @ 4-byte Spill
+ umull r5, r6, r0, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r5, r5, r1
+ ldr r1, [r3, #24]
+ umull r3, r2, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adcs r0, r3, r0
+ adc r3, r1, #0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adds r11, r11, r1
+ adcs r1, r9, lr
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r1, r7, r1
+ ldr r7, [sp, #80] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ adcs r1, r4, r10
+ str r1, [sp, #20] @ 4-byte Spill
+ adcs r1, r5, r8
+ ldr r5, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ adc r0, r3, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ umull r9, r0, r12, r1
+ umull r3, r4, r12, r2
+ adds r3, r0, r3
+ umull r1, r3, r12, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ adcs r4, r4, r1
+ umlal r0, r1, r12, r2
+ umull r4, r6, r12, r5
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs r10, r3, r4
+ umull r4, r3, r12, r5
+ adcs r8, r6, r4
+ umull r6, r4, r12, r7
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adcs r5, r3, r6
+ umull r6, r3, r12, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r4, r4, r6
+ adc r2, r3, #0
+ adds r3, r9, r11
+ adcs r0, r0, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r7
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r6, r10, r7
+ ldr r7, [sp, #16] @ 4-byte Reload
+ adcs r11, r8, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ ldr r8, [sp, #72] @ 4-byte Reload
+ adcs r7, r5, r7
+ ldr r5, [sp, #8] @ 4-byte Reload
+ str r7, [sp, #16] @ 4-byte Spill
+ adcs r7, r4, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ adc r2, r2, #0
+ str r7, [sp, #20] @ 4-byte Spill
+ str r2, [sp, #28] @ 4-byte Spill
+ mul r2, r3, r5
+ ldr r5, [sp, #56] @ 4-byte Reload
+ umull r4, r7, r2, r5
+ adds r3, r4, r3
+ str r7, [sp, #24] @ 4-byte Spill
+ umull r3, r7, r2, r8
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adcs lr, r3, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r7, [sp, #12] @ 4-byte Spill
+ umull r3, r7, r2, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r12, r3, r1
+ str r7, [sp, #8] @ 4-byte Spill
+ umull r3, r10, r2, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r3, r3, r6
+ umull r6, r9, r2, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r5, r6, r11
+ ldr r11, [sp, #76] @ 4-byte Reload
+ umull r6, r1, r2, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r6, r6, r0
+ umull r7, r0, r2, r11
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r2, r7, r2
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adc r7, r7, #0
+ adds r4, lr, r4
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [sp, #12] @ 4-byte Reload
+ adcs r4, r12, r4
+ ldr r12, [sp, #52] @ 4-byte Reload
+ str r4, [sp, #24] @ 4-byte Spill
+ ldr r4, [sp, #8] @ 4-byte Reload
+ adcs r3, r3, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ str r3, [sp, #20] @ 4-byte Spill
+ adcs r3, r5, r10
+ ldr r5, [sp, #48] @ 4-byte Reload
+ str r3, [sp, #16] @ 4-byte Spill
+ adcs r3, r6, r9
+ ldr r9, [sp, #68] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [sp, #80] @ 4-byte Reload
+ adc r0, r7, r0
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp] @ 4-byte Reload
+ umull r2, r6, r0, r12
+ umull r11, lr, r0, r1
+ adds r2, lr, r2
+ umull r1, r2, r0, r3
+ adcs r6, r6, r1
+ umlal lr, r1, r0, r12
+ umull r6, r3, r0, r5
+ adcs r5, r2, r6
+ umull r6, r2, r0, r4
+ adcs r10, r3, r6
+ umull r6, r3, r0, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r4, r2, r6
+ umull r6, r2, r0, r9
+ ldr r9, [sp, #56] @ 4-byte Reload
+ adcs r3, r3, r6
+ ldr r6, [sp, #24] @ 4-byte Reload
+ adc r2, r2, #0
+ adds r7, r11, r7
+ adcs r0, lr, r6
+ ldr r6, [sp, #20] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adcs r6, r5, r6
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adcs r11, r10, r5
+ ldr r5, [sp, #8] @ 4-byte Reload
+ adcs r10, r4, r5
+ ldr r5, [sp, #4] @ 4-byte Reload
+ ldr r4, [sp, #92] @ 4-byte Reload
+ adcs r3, r3, r5
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [sp, #60] @ 4-byte Reload
+ adc r2, r2, #0
+ str r2, [sp, #24] @ 4-byte Spill
+ mul r2, r7, r3
+ umull r3, r5, r2, r9
+ adds r3, r3, r7
+ str r5, [sp, #20] @ 4-byte Spill
+ umull r3, r7, r2, r8
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adcs r8, r3, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ umull r3, lr, r2, r7
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adcs r1, r3, r1
+ umull r3, r12, r2, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r3, r3, r6
+ umull r6, r5, r2, r4
+ adcs r6, r6, r11
+ umull r4, r11, r2, r7
+ adcs r4, r4, r10
+ umull r7, r10, r2, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r2, r7, r0
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r7, r0, #0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adds r0, r8, r0
+ ldr r8, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ adcs r0, r3, lr
+ ldr r3, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ adcs r0, r6, r12
+ ldr r6, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ adcs r0, r4, r5
+ str r0, [sp, #12] @ 4-byte Spill
+ adcs r0, r2, r11
+ str r0, [sp, #8] @ 4-byte Spill
+ adc r0, r7, r10
+ ldr r7, [sp, #80] @ 4-byte Reload
+ ldr r10, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #4] @ 4-byte Spill
+ umull r4, r0, r6, r1
+ umull r11, r2, r6, r3
+ adds r4, r2, r4
+ umull r3, r4, r6, r7
+ adcs r0, r0, r3
+ umlal r2, r3, r6, r1
+ umull r0, r7, r6, r8
+ adcs r5, r4, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ umull r4, r1, r6, r0
+ mov r0, r6
+ adcs r4, r7, r4
+ umull r7, r12, r6, r10
+ ldr r6, [sp, #68] @ 4-byte Reload
+ adcs lr, r1, r7
+ umull r7, r1, r0, r6
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r7, r12, r7
+ adc r12, r1, #0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adds r0, r11, r0
+ adcs r2, r2, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r3, r3, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r6, r5, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r11, r4, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r1, lr, r1
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r1, r7, r1
+ str r1, [sp, #24] @ 4-byte Spill
+ adc r1, r12, #0
+ ldr r12, [sp, #76] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ mul r4, r0, r1
+ umull r7, r1, r4, r9
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adds r0, r7, r0
+ umull r0, r7, r4, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs lr, r0, r2
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [sp, #84] @ 4-byte Reload
+ umull r2, r0, r4, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ adcs r2, r2, r3
+ umull r3, r0, r4, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r3, r3, r6
+ umull r6, r5, r4, r1
+ adcs r6, r6, r11
+ umull r1, r11, r4, r7
+ umull r7, r9, r4, r12
+ ldr r12, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r0
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r4, r7, r0
+ ldr r7, [sp, #32] @ 4-byte Reload
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r7, r7, #0
+ adds r0, lr, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ adcs r0, r1, r5
+ str r0, [sp, #16] @ 4-byte Spill
+ adcs r0, r4, r11
+ str r0, [sp, #12] @ 4-byte Spill
+ adc r0, r7, r9
+ ldr r9, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r4, [r9, #16]
+ umull r11, r3, r4, r2
+ ldr r2, [sp, #80] @ 4-byte Reload
+ umull r0, r1, r4, r12
+ adds r0, r3, r0
+ umull r5, r0, r4, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r5
+ umlal r3, r5, r4, r12
+ umull r1, r7, r4, r8
+ adcs r8, r0, r1
+ umull r1, r0, r4, r2
+ adcs lr, r7, r1
+ umull r7, r1, r4, r10
+ adcs r2, r0, r7
+ umull r7, r0, r4, r6
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adcs r1, r1, r7
+ ldr r7, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r4, r11, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r3, r3, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ adcs r5, r5, r7
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r7, r8, r7
+ adcs r11, lr, r6
+ ldr r6, [sp, #12] @ 4-byte Reload
+ adcs r10, r2, r6
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ mul r0, r4, r1
+ umull r1, r6, r0, r2
+ ldr r2, [sp, #72] @ 4-byte Reload
+ adds r1, r1, r4
+ str r6, [sp, #24] @ 4-byte Spill
+ ldr r4, [sp, #84] @ 4-byte Reload
+ umull r1, r6, r0, r2
+ adcs lr, r1, r3
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r6, [sp, #20] @ 4-byte Spill
+ umull r3, r2, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r3, r3, r5
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [sp, #24] @ 4-byte Reload
+ umull r5, r8, r0, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r5, r5, r7
+ umull r7, r12, r0, r1
+ adcs r6, r7, r11
+ ldr r11, [sp, #76] @ 4-byte Reload
+ umull r7, r1, r0, r4
+ adcs r7, r7, r10
+ umull r4, r10, r0, r11
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #28] @ 4-byte Reload
+ adc r4, r4, #0
+ adds r2, lr, r2
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r2, r3, r2
+ ldr r3, [sp, #52] @ 4-byte Reload
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r11, r5, r2
+ adcs r2, r6, r8
+ ldr r6, [sp, #48] @ 4-byte Reload
+ ldr r8, [sp, #76] @ 4-byte Reload
+ str r2, [sp, #24] @ 4-byte Spill
+ adcs r2, r7, r12
+ ldr r7, [r9, #20]
+ adcs r0, r0, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r2, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #16] @ 4-byte Spill
+ adc r0, r4, r10
+ str r0, [sp, #12] @ 4-byte Spill
+ umull r4, r0, r7, r3
+ umull r10, r2, r7, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adds r4, r2, r4
+ umull r5, r4, r7, r1
+ adcs r0, r0, r5
+ umlal r2, r5, r7, r3
+ ldr r3, [sp, #68] @ 4-byte Reload
+ umull r0, r1, r7, r6
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adcs lr, r4, r0
+ umull r4, r0, r7, r6
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adcs r12, r1, r4
+ umull r4, r1, r7, r6
+ adcs r9, r0, r4
+ umull r4, r0, r7, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r4
+ adc r0, r0, #0
+ adds r4, r10, r3
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r2, r2, r3
+ ldr r3, [sp, #24] @ 4-byte Reload
+ adcs r5, r5, r11
+ adcs r7, lr, r3
+ ldr r3, [sp, #20] @ 4-byte Reload
+ adcs r11, r12, r3
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adcs r9, r9, r3
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adcs r1, r1, r3
+ ldr r3, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ mul r0, r4, r1
+ umull r1, r6, r0, r3
+ ldr r3, [sp, #72] @ 4-byte Reload
+ adds r1, r1, r4
+ str r6, [sp, #24] @ 4-byte Spill
+ ldr r4, [sp, #84] @ 4-byte Reload
+ umull r1, r6, r0, r3
+ ldr r3, [sp, #100] @ 4-byte Reload
+ adcs r12, r1, r2
+ str r6, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ umull r2, r10, r0, r3
+ ldr r3, [sp, #92] @ 4-byte Reload
+ adcs r2, r2, r5
+ umull r5, lr, r0, r3
+ ldr r3, [sp, #88] @ 4-byte Reload
+ adcs r5, r5, r7
+ umull r7, r6, r0, r3
+ adcs r7, r7, r11
+ umull r3, r11, r0, r4
+ adcs r3, r3, r9
+ umull r4, r9, r0, r8
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #28] @ 4-byte Reload
+ adc r4, r4, #0
+ adds r8, r12, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r1, [sp, #16] @ 4-byte Spill
+ adcs r1, r5, r10
+ ldr r5, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ adcs r1, r7, lr
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ adcs r1, r3, r6
+ adcs r0, r0, r11
+ str r1, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r9, r4, r9
+ ldr r4, [r0, #24]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ umull r6, lr, r4, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ umull r12, r1, r4, r5
+ umull r11, r2, r4, r0
+ mov r0, r6
+ mov r3, r2
+ adds r2, r2, r12
+ adcs r1, r1, r6
+ ldr r6, [sp, #48] @ 4-byte Reload
+ umlal r3, r0, r4, r5
+ umull r1, r2, r4, r6
+ adcs r5, lr, r1
+ umull r6, r1, r4, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ adcs lr, r2, r6
+ umull r6, r2, r4, r7
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adcs r12, r1, r6
+ umull r6, r1, r4, r7
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r2, r2, r6
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adc r1, r1, #0
+ adds r4, r11, r8
+ ldr r11, [sp, #88] @ 4-byte Reload
+ adcs r3, r3, r6
+ ldr r6, [sp, #32] @ 4-byte Reload
+ adcs r6, r0, r6
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r5, r5, r0
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r10, lr, r0
+ adcs r7, r12, r7
+ adcs r12, r2, r9
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adc lr, r1, #0
+ mul r1, r4, r2
+ umull r2, r8, r1, r7
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adds r2, r2, r4
+ umull r2, r9, r1, r7
+ ldr r7, [sp, #72] @ 4-byte Reload
+ umull r4, r0, r1, r7
+ ldr r7, [sp, #92] @ 4-byte Reload
+ adcs r3, r4, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ adcs r0, r2, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ umull r2, r0, r1, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ adcs r0, r2, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ umull r5, r0, r1, r11
+ adcs r2, r5, r10
+ ldr r10, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r5, [sp, #76] @ 4-byte Reload
+ umull r6, r0, r1, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r6, r6, r0
+ umull r4, r0, r1, r5
+ adcs r1, r4, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adc r4, lr, #0
+ adds r8, r3, r8
+ ldr r3, [sp, #60] @ 4-byte Reload
+ adcs r0, r3, r0
+ ldr r3, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ adcs lr, r3, r9
+ ldr r3, [sp, #68] @ 4-byte Reload
+ adcs r12, r2, r3
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r3, r6, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r3, [sp, #96] @ 4-byte Spill
+ adcs r2, r1, r2
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adc r9, r4, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ subs r4, r8, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ sbcs r6, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ sbcs r1, lr, r1
+ sbcs r7, r12, r7
+ sbcs r11, r3, r11
+ ldr r3, [sp, #36] @ 4-byte Reload
+ sbcs r10, r2, r10
+ sbc r5, r9, r5
+ asr r0, r5, #31
+ cmp r0, #0
+ movlt r4, r8
+ movlt r1, lr
+ str r4, [r3]
+ ldr r4, [sp, #80] @ 4-byte Reload
+ movlt r6, r4
+ cmp r0, #0
+ str r6, [r3, #4]
+ str r1, [r3, #8]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ movlt r7, r12
+ movlt r10, r2
+ str r7, [r3, #12]
+ movlt r11, r1
+ cmp r0, #0
+ movlt r5, r9
+ str r11, [r3, #16]
+ str r10, [r3, #20]
+ str r5, [r3, #24]
+ add sp, sp, #104
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end99:
+ .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed7L
+ .align 2
+ .type mcl_fp_montRed7L,%function
+mcl_fp_montRed7L: @ @mcl_fp_montRed7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #120
+ sub sp, sp, #120
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #4]
+ ldr r10, [r2, #-4]
+ ldr r4, [r1]
+ ldr r3, [r2]
+ ldr r7, [r2, #8]
+ ldr r5, [r2, #4]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #8]
+ str r4, [sp, #60] @ 4-byte Spill
+ str r7, [sp, #108] @ 4-byte Spill
+ str r3, [sp, #116] @ 4-byte Spill
+ str r5, [sp, #24] @ 4-byte Spill
+ str r10, [sp, #92] @ 4-byte Spill
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r1, #12]
+ str r0, [sp, #76] @ 4-byte Spill
+ mul r0, r4, r10
+ umull r4, r12, r0, r3
+ umull lr, r6, r0, r7
+ str r4, [sp, #52] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ str r6, [sp, #72] @ 4-byte Spill
+ mov r9, lr
+ mov r3, r12
+ umlal r3, r9, r0, r5
+ umull r7, r6, r0, r4
+ str r4, [sp, #104] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ str r7, [sp, #68] @ 4-byte Spill
+ str r6, [sp, #64] @ 4-byte Spill
+ umull r7, r6, r0, r4
+ str r4, [sp, #112] @ 4-byte Spill
+ ldr r4, [r2, #16]
+ ldr r2, [r2, #12]
+ str r7, [sp, #44] @ 4-byte Spill
+ str r6, [sp, #48] @ 4-byte Spill
+ str r4, [sp, #96] @ 4-byte Spill
+ umull r8, r7, r0, r4
+ str r2, [sp, #100] @ 4-byte Spill
+ umull r4, r6, r0, r2
+ umull r11, r2, r0, r5
+ adds r0, r12, r11
+ ldr r11, [r1, #36]
+ adcs r0, r2, lr
+ ldr r2, [sp, #48] @ 4-byte Reload
+ ldr lr, [r1, #28]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r5, r6, r8
+ ldr r8, [sp, #108] @ 4-byte Reload
+ ldr r6, [sp, #56] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adds r0, r0, r2
+ ldr r2, [r1, #24]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ ldr r3, [r1, #20]
+ mul r4, r0, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ ldr r10, [r1, #40]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ umull r12, r1, r4, r8
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r9
+ ldr r9, [sp, #96] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ umull r7, r1, r4, r6
+ str r7, [sp, #28] @ 4-byte Spill
+ mov r7, r12
+ adcs r0, r3, r0
+ ldr r3, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ mov r0, r1
+ umlal r0, r7, r4, r5
+ adcs r2, r2, r3
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r2, lr, r2
+ ldr lr, [sp, #100] @ 4-byte Reload
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [sp, #60] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #60] @ 4-byte Spill
+ adcs r2, r11, #0
+ mov r11, r5
+ str r2, [sp, #56] @ 4-byte Spill
+ adcs r2, r10, #0
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adcs r2, r2, #0
+ str r2, [sp, #40] @ 4-byte Spill
+ mov r2, #0
+ adc r2, r2, #0
+ str r2, [sp, #36] @ 4-byte Spill
+ umull r3, r2, r4, r5
+ ldr r5, [sp, #20] @ 4-byte Reload
+ adds r1, r1, r3
+ adcs r2, r2, r12
+ umull r1, r3, r4, r9
+ umull r2, r12, r4, lr
+ adcs r2, r5, r2
+ adcs r10, r12, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r2, [sp] @ 4-byte Spill
+ ldr r12, [sp, #92] @ 4-byte Reload
+ umull r5, r2, r4, r1
+ adcs r1, r3, r5
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ umull r5, r3, r4, r1
+ adcs r2, r2, r5
+ ldr r5, [sp] @ 4-byte Reload
+ str r2, [sp, #8] @ 4-byte Spill
+ adc r2, r3, #0
+ ldr r3, [sp, #28] @ 4-byte Reload
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [sp, #84] @ 4-byte Reload
+ adds r4, r3, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r4, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ umull r3, r0, r4, r8
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ umull r0, r2, r4, r6
+ ldr r6, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r10, r0
+ adcs r6, r7, r6
+ ldr r7, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ mov r0, r2
+ str r6, [sp, #76] @ 4-byte Spill
+ ldr r6, [sp, #64] @ 4-byte Reload
+ umlal r0, r5, r4, r11
+ adcs r6, r7, r6
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r6, [sp, #72] @ 4-byte Spill
+ ldr r6, [sp, #60] @ 4-byte Reload
+ adcs r6, r7, r6
+ umull r7, r8, r4, r1
+ str r6, [sp, #68] @ 4-byte Spill
+ ldr r6, [sp, #56] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #64] @ 4-byte Spill
+ ldr r6, [sp, #52] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #60] @ 4-byte Spill
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #56] @ 4-byte Spill
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #52] @ 4-byte Spill
+ ldr r6, [sp, #40] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #48] @ 4-byte Spill
+ ldr r6, [sp, #36] @ 4-byte Reload
+ adc r6, r6, #0
+ str r6, [sp, #44] @ 4-byte Spill
+ umull r6, r10, r4, r11
+ adds r1, r2, r6
+ adcs r2, r10, r3
+ umull r1, r6, r4, lr
+ ldr lr, [sp, #108] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r10, r2, r1
+ umull r2, r3, r4, r9
+ adcs r9, r6, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ umull r6, r1, r4, r2
+ adcs r3, r3, r6
+ adcs r1, r1, r7
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adc r8, r8, #0
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adds r7, r3, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ mul r7, r0, r12
+ str r0, [sp, #40] @ 4-byte Spill
+ umull r3, r0, r7, lr
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ umull r4, r1, r7, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r4, [sp, #36] @ 4-byte Spill
+ mov r4, r3
+ adcs r0, r5, r0
+ ldr r5, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #104] @ 4-byte Reload
+ adcs r5, r9, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ mov r0, r1
+ str r5, [sp, #80] @ 4-byte Spill
+ ldr r5, [sp, #72] @ 4-byte Reload
+ umlal r0, r4, r7, r11
+ adcs r5, r6, r5
+ ldr r6, [sp, #12] @ 4-byte Reload
+ str r5, [sp, #76] @ 4-byte Spill
+ ldr r5, [sp, #68] @ 4-byte Reload
+ adcs r5, r6, r5
+ str r5, [sp, #72] @ 4-byte Spill
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs r6, r8, r5
+ ldr r8, [sp, #100] @ 4-byte Reload
+ str r6, [sp, #68] @ 4-byte Spill
+ ldr r6, [sp, #60] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #64] @ 4-byte Spill
+ ldr r6, [sp, #56] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #60] @ 4-byte Spill
+ ldr r6, [sp, #52] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #56] @ 4-byte Spill
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adcs r6, r6, #0
+ str r6, [sp, #52] @ 4-byte Spill
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adc r6, r6, #0
+ str r6, [sp, #48] @ 4-byte Spill
+ umull r9, r6, r7, r10
+ str r6, [sp, #44] @ 4-byte Spill
+ umull r6, r5, r7, r11
+ adds r1, r1, r6
+ umull r6, r12, r7, r2
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r2, r5, r3
+ umull r2, r3, r7, r8
+ adcs r1, r1, r2
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ umull r5, r2, r7, r1
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adcs r3, r3, r5
+ ldr r5, [sp, #116] @ 4-byte Reload
+ adcs r2, r2, r6
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [sp, #28] @ 4-byte Reload
+ str r2, [sp, #16] @ 4-byte Spill
+ adcs r2, r12, r9
+ ldr r9, [sp, #92] @ 4-byte Reload
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adc r2, r2, #0
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adds r6, r7, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r6, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ umull r7, r0, r6, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ umull r0, r2, r6, r5
+ mov r12, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r2
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ umlal r4, r12, r6, r11
+ adcs r0, r3, r0
+ ldr r3, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r3, r0
+ ldr r3, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ ldr r3, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ ldr r3, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ umull r3, r0, r6, r10
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ umull lr, r0, r6, r3
+ str r0, [sp, #20] @ 4-byte Spill
+ umull r10, r0, r6, r11
+ adds r2, r2, r10
+ adcs r0, r0, r7
+ umull r2, r10, r6, r1
+ umull r0, r1, r6, r8
+ ldr r6, [sp, #32] @ 4-byte Reload
+ adcs r8, r6, r0
+ adcs r0, r1, r2
+ ldr r1, [sp, #20] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r10, r10, lr
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc lr, r0, #0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adds r7, r2, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ mul r4, r0, r9
+ ldr r0, [sp, #108] @ 4-byte Reload
+ umull r7, r2, r4, r0
+ str r2, [sp, #40] @ 4-byte Spill
+ umull r2, r0, r4, r5
+ ldr r5, [sp, #84] @ 4-byte Reload
+ str r2, [sp, #44] @ 4-byte Spill
+ mov r6, r0
+ mov r2, r7
+ umlal r6, r2, r4, r11
+ adcs r5, r12, r5
+ ldr r12, [sp, #100] @ 4-byte Reload
+ str r5, [sp, #84] @ 4-byte Spill
+ ldr r5, [sp, #80] @ 4-byte Reload
+ adcs r5, r8, r5
+ ldr r8, [sp, #104] @ 4-byte Reload
+ str r5, [sp, #80] @ 4-byte Spill
+ ldr r5, [sp, #76] @ 4-byte Reload
+ adcs r5, r1, r5
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r5, [sp, #76] @ 4-byte Spill
+ ldr r5, [sp, #72] @ 4-byte Reload
+ adcs r5, r10, r5
+ str r5, [sp, #72] @ 4-byte Spill
+ ldr r5, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, lr, r1
+ ldr lr, [sp, #96] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, #0
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, #0
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #52] @ 4-byte Spill
+ umull r5, r1, r4, r8
+ str r5, [sp, #32] @ 4-byte Spill
+ str r1, [sp, #36] @ 4-byte Spill
+ umull r5, r1, r4, r3
+ str r5, [sp, #20] @ 4-byte Spill
+ umull r9, r5, r4, r11
+ str r1, [sp, #28] @ 4-byte Spill
+ adds r0, r0, r9
+ umull r3, r9, r4, lr
+ umull r0, r1, r4, r12
+ adcs r4, r5, r7
+ ldr r4, [sp, #40] @ 4-byte Reload
+ adcs r10, r4, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r4, [sp, #28] @ 4-byte Reload
+ adcs r1, r1, r3
+ adcs r3, r9, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r9, [sp, #112] @ 4-byte Reload
+ adcs r7, r4, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r4, [sp, #48] @ 4-byte Reload
+ adc r5, r0, #0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adds r4, r0, r4
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r4, r6, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r6, [sp, #108] @ 4-byte Reload
+ adcs r2, r2, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r2, [sp, #84] @ 4-byte Spill
+ adcs r0, r10, r0
+ mov r10, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ mul r0, r4, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ umull r2, r7, r0, r11
+ umull r4, r3, r0, r1
+ adds r2, r3, r2
+ str r4, [sp, #92] @ 4-byte Spill
+ umull r1, r2, r0, r6
+ adcs r4, r7, r1
+ umlal r3, r1, r0, r11
+ umull r4, r5, r0, r12
+ adcs r2, r2, r4
+ str r2, [sp, #52] @ 4-byte Spill
+ umull r4, r2, r0, lr
+ adcs r7, r5, r4
+ str r7, [sp, #48] @ 4-byte Spill
+ umull r7, r4, r0, r9
+ adcs r5, r2, r7
+ umull r7, r2, r0, r8
+ adcs r7, r4, r7
+ adc r0, r2, #0
+ ldr r2, [sp, #92] @ 4-byte Reload
+ adds r2, r2, r10
+ ldr r2, [sp, #84] @ 4-byte Reload
+ adcs r12, r3, r2
+ ldr r2, [sp, #80] @ 4-byte Reload
+ adcs lr, r1, r2
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r10, r2, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ ldr r2, [sp, #48] @ 4-byte Reload
+ adcs r4, r2, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r8, r5, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r8, [sp, #84] @ 4-byte Spill
+ adcs r2, r7, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r7, [sp, #100] @ 4-byte Reload
+ str r2, [sp, #92] @ 4-byte Spill
+ adcs r1, r0, r1
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r3, r0, #0
+ ldr r0, [sp, #116] @ 4-byte Reload
+ subs r0, r12, r0
+ sbcs r5, lr, r11
+ mov r11, r4
+ sbcs r6, r10, r6
+ sbcs r7, r4, r7
+ ldr r4, [sp, #96] @ 4-byte Reload
+ sbcs r4, r8, r4
+ sbcs r8, r2, r9
+ ldr r2, [sp, #104] @ 4-byte Reload
+ sbcs r9, r1, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ sbc r3, r3, #0
+ ands r3, r3, #1
+ movne r0, r12
+ movne r5, lr
+ movne r6, r10
+ cmp r3, #0
+ str r0, [r2]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ movne r7, r11
+ str r5, [r2, #4]
+ str r6, [r2, #8]
+ str r7, [r2, #12]
+ movne r4, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r4, [r2, #16]
+ movne r8, r0
+ cmp r3, #0
+ movne r9, r1
+ str r8, [r2, #20]
+ str r9, [r2, #24]
+ add sp, sp, #120
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end100:
+ .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre7L
+ .align 2
+ .type mcl_fp_addPre7L,%function
+mcl_fp_addPre7L: @ @mcl_fp_addPre7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #8
+ sub sp, sp, #8
+ ldr r3, [r1, #4]
+ ldr r9, [r1]
+ ldr r7, [r2]
+ ldr lr, [r1, #8]
+ ldr r10, [r1, #12]
+ ldr r11, [r1, #16]
+ ldr r8, [r1, #24]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [r1, #20]
+ adds r7, r7, r9
+ str r3, [sp] @ 4-byte Spill
+ ldmib r2, {r1, r3, r4, r5, r12}
+ ldr r6, [sp, #4] @ 4-byte Reload
+ ldr r2, [r2, #24]
+ str r7, [r0]
+ adcs r1, r1, r6
+ ldr r6, [sp] @ 4-byte Reload
+ adcs r3, r3, lr
+ adcs r4, r4, r10
+ adcs r5, r5, r11
+ adcs r6, r12, r6
+ adcs r2, r2, r8
+ stmib r0, {r1, r3, r4, r5, r6}
+ str r2, [r0, #24]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #8
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end101:
+ .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre7L
+ .align 2
+ .type mcl_fp_subPre7L,%function
+mcl_fp_subPre7L: @ @mcl_fp_subPre7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #8
+ sub sp, sp, #8
+ ldr r3, [r2, #4]
+ ldr r9, [r2]
+ ldr r7, [r1]
+ ldr lr, [r2, #8]
+ ldr r10, [r2, #12]
+ ldr r11, [r2, #16]
+ ldr r8, [r2, #24]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [r2, #20]
+ subs r7, r7, r9
+ str r3, [sp] @ 4-byte Spill
+ ldmib r1, {r2, r3, r4, r5, r12}
+ ldr r6, [sp, #4] @ 4-byte Reload
+ ldr r1, [r1, #24]
+ str r7, [r0]
+ sbcs r2, r2, r6
+ ldr r6, [sp] @ 4-byte Reload
+ sbcs r3, r3, lr
+ sbcs r4, r4, r10
+ sbcs r5, r5, r11
+ sbcs r6, r12, r6
+ sbcs r1, r1, r8
+ stmib r0, {r2, r3, r4, r5, r6}
+ str r1, [r0, #24]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #8
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end102:
+ .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_7L
+ .align 2
+ .type mcl_fp_shr1_7L,%function
+mcl_fp_shr1_7L: @ @mcl_fp_shr1_7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r11, lr}
+ push {r4, r5, r6, r7, r11, lr}
+ ldr r3, [r1, #4]
+ ldr r12, [r1]
+ ldr lr, [r1, #12]
+ ldr r2, [r1, #8]
+ ldr r5, [r1, #20]
+ ldr r4, [r1, #16]
+ ldr r1, [r1, #24]
+ lsrs r6, r3, #1
+ lsr r3, r3, #1
+ rrx r12, r12
+ lsrs r6, lr, #1
+ orr r7, r3, r2, lsl #31
+ lsr r6, lr, #1
+ rrx r2, r2
+ lsrs r3, r5, #1
+ lsr r5, r5, #1
+ str r12, [r0]
+ str r7, [r0, #4]
+ orr r5, r5, r1, lsl #31
+ orr r6, r6, r4, lsl #31
+ rrx r3, r4
+ lsr r1, r1, #1
+ str r2, [r0, #8]
+ str r6, [r0, #12]
+ str r3, [r0, #16]
+ str r5, [r0, #20]
+ str r1, [r0, #24]
+ pop {r4, r5, r6, r7, r11, lr}
+ mov pc, lr
+.Lfunc_end103:
+ .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add7L
+ .align 2
+ .type mcl_fp_add7L,%function
+mcl_fp_add7L: @ @mcl_fp_add7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #12
+ sub sp, sp, #12
+ ldr r7, [r1, #8]
+ ldr r10, [r1]
+ ldr r9, [r1, #4]
+ ldr r11, [r1, #16]
+ ldr r8, [r1, #24]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r1, #12]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r1, #20]
+ ldm r2, {r1, r4, r5, r6, r12, lr}
+ ldr r2, [r2, #24]
+ adds r10, r1, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r4, r4, r9
+ str r10, [r0]
+ adcs r5, r5, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r6, r6, r1
+ mov r1, #0
+ adcs r9, r12, r11
+ adcs r7, lr, r7
+ stmib r0, {r4, r5, r6, r9}
+ adcs r2, r2, r8
+ str r7, [r0, #20]
+ adc r1, r1, #0
+ str r2, [r0, #24]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [r3]
+ str r1, [sp] @ 4-byte Spill
+ ldmib r3, {r12, lr}
+ ldr r1, [r3, #20]
+ ldr r8, [r3, #12]
+ ldr r11, [r3, #16]
+ ldr r3, [r3, #24]
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [sp] @ 4-byte Reload
+ subs r10, r10, r1
+ sbcs r1, r4, r12
+ ldr r4, [sp, #4] @ 4-byte Reload
+ sbcs r5, r5, lr
+ sbcs r12, r6, r8
+ str r5, [sp] @ 4-byte Spill
+ sbcs lr, r9, r11
+ sbcs r4, r7, r4
+ sbcs r5, r2, r3
+ ldr r2, [sp, #8] @ 4-byte Reload
+ sbc r2, r2, #0
+ tst r2, #1
+ bne .LBB104_2
+@ BB#1: @ %nocarry
+ str r10, [r0]
+ str r1, [r0, #4]
+ ldr r1, [sp] @ 4-byte Reload
+ add r2, r0, #8
+ stm r2, {r1, r12, lr}
+ str r4, [r0, #20]
+ str r5, [r0, #24]
+.LBB104_2: @ %carry
+ add sp, sp, #12
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end104:
+ .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF7L
+ .align 2
+ .type mcl_fp_addNF7L,%function
+mcl_fp_addNF7L: @ @mcl_fp_addNF7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #20
+ sub sp, sp, #20
+ ldm r1, {r6, r7}
+ ldr r11, [r1, #16]
+ ldr r9, [r1, #20]
+ ldr r8, [r1, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r1, #8]
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [r1, #12]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldm r2, {r1, r4, r5, r10, r12, lr}
+ ldr r2, [r2, #24]
+ adds r7, r1, r6
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r7, [sp, #4] @ 4-byte Spill
+ adcs r6, r4, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r6, [sp, #16] @ 4-byte Spill
+ adcs r5, r5, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r4, r10, r1
+ ldr r10, [r3, #8]
+ adcs r12, r12, r11
+ ldr r11, [r3, #16]
+ adcs lr, lr, r9
+ ldr r9, [r3, #20]
+ adc r1, r2, r8
+ ldr r2, [r3]
+ ldr r8, [r3, #12]
+ str r1, [sp, #12] @ 4-byte Spill
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r3, #4]
+ ldr r3, [r3, #24]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [sp] @ 4-byte Reload
+ subs r2, r7, r2
+ ldr r7, [sp, #8] @ 4-byte Reload
+ sbcs r7, r6, r7
+ sbcs r6, r5, r10
+ mov r10, r12
+ sbcs r8, r4, r8
+ sbcs r11, r12, r11
+ sbcs r12, lr, r9
+ ldr r9, [sp, #4] @ 4-byte Reload
+ sbc r3, r1, r3
+ asr r1, r3, #31
+ cmp r1, #0
+ movlt r2, r9
+ movlt r6, r5
+ str r2, [r0]
+ ldr r2, [sp, #16] @ 4-byte Reload
+ movlt r7, r2
+ cmp r1, #0
+ movlt r8, r4
+ movlt r11, r10
+ movlt r12, lr
+ cmp r1, #0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r7, [r0, #4]
+ str r6, [r0, #8]
+ str r8, [r0, #12]
+ str r11, [r0, #16]
+ str r12, [r0, #20]
+ movlt r3, r1
+ str r3, [r0, #24]
+ add sp, sp, #20
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end105:
+ .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub7L
+ .align 2
+ .type mcl_fp_sub7L,%function
+mcl_fp_sub7L: @ @mcl_fp_sub7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #12
+ sub sp, sp, #12
+ ldr r7, [r2, #8]
+ ldr r11, [r2]
+ ldr r9, [r2, #4]
+ ldr r8, [r2, #20]
+ ldr r10, [r2, #24]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r2, #12]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r2, #16]
+ str r7, [sp] @ 4-byte Spill
+ ldm r1, {r2, r4, r5, r6, r7, lr}
+ ldr r1, [r1, #24]
+ subs r12, r2, r11
+ ldr r2, [sp, #8] @ 4-byte Reload
+ sbcs r9, r4, r9
+ ldr r4, [sp, #4] @ 4-byte Reload
+ str r12, [r0]
+ str r9, [r0, #4]
+ sbcs r2, r5, r2
+ sbcs r11, r6, r4
+ ldr r4, [sp] @ 4-byte Reload
+ str r2, [r0, #8]
+ str r11, [r0, #12]
+ sbcs r4, r7, r4
+ sbcs r5, lr, r8
+ sbcs r6, r1, r10
+ add r1, r0, #16
+ stm r1, {r4, r5, r6}
+ mov r1, #0
+ sbc r1, r1, #0
+ tst r1, #1
+ beq .LBB106_2
+@ BB#1: @ %carry
+ ldr r1, [r3]
+ ldr r7, [r3, #4]
+ ldr lr, [r3, #12]
+ ldr r8, [r3, #16]
+ ldr r10, [r3, #20]
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [r3, #8]
+ ldr r3, [r3, #24]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adds r1, r1, r12
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r7, r7, r9
+ adcs r2, r1, r2
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r12, lr, r11
+ adcs r4, r8, r4
+ adcs r5, r10, r5
+ adc r3, r3, r6
+ stm r0, {r1, r7}
+ str r2, [r0, #8]
+ str r12, [r0, #12]
+ str r4, [r0, #16]
+ str r5, [r0, #20]
+ str r3, [r0, #24]
+.LBB106_2: @ %nocarry
+ add sp, sp, #12
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end106:
+ .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF7L
+ .align 2
+ .type mcl_fp_subNF7L,%function
+mcl_fp_subNF7L: @ @mcl_fp_subNF7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldm r2, {r5, lr}
+ ldr r7, [r2, #8]
+ ldr r11, [r2, #16]
+ ldr r10, [r2, #24]
+ add r9, r1, #12
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [r2, #12]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldm r1, {r2, r4, r12}
+ ldm r9, {r6, r8, r9}
+ ldr r7, [r1, #24]
+ ldr r1, [sp, #12] @ 4-byte Reload
+ subs r5, r2, r5
+ sbcs lr, r4, lr
+ sbcs r4, r12, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str lr, [sp] @ 4-byte Spill
+ sbcs r12, r6, r1
+ ldr r6, [r3, #4]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ sbcs r2, r8, r11
+ ldr r8, [r3, #12]
+ ldr r11, [r3, #16]
+ str r2, [sp, #12] @ 4-byte Spill
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [r3, #20]
+ sbcs r1, r9, r1
+ sbc r9, r7, r10
+ ldr r7, [r3]
+ ldr r10, [r3, #8]
+ ldr r3, [r3, #24]
+ str r6, [sp, #8] @ 4-byte Spill
+ ldr r6, [sp, #4] @ 4-byte Reload
+ adds r7, r5, r7
+ adcs r6, lr, r6
+ adcs lr, r4, r10
+ mov r10, r1
+ adcs r8, r12, r8
+ adcs r11, r2, r11
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r2, r1, r2
+ asr r1, r9, #31
+ adc r3, r9, r3
+ cmp r1, #0
+ movge r7, r5
+ ldr r5, [sp] @ 4-byte Reload
+ movge lr, r4
+ str r7, [r0]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ movge r6, r5
+ cmp r1, #0
+ movge r8, r12
+ movge r11, r7
+ movge r2, r10
+ cmp r1, #0
+ str r6, [r0, #4]
+ str lr, [r0, #8]
+ movge r3, r9
+ str r8, [r0, #12]
+ str r11, [r0, #16]
+ str r2, [r0, #20]
+ str r3, [r0, #24]
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end107:
+ .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add7L
+ .align 2
+ .type mcl_fpDbl_add7L,%function
+mcl_fpDbl_add7L: @ @mcl_fpDbl_add7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #48
+ sub sp, sp, #48
+ ldm r1, {r12, lr}
+ ldr r8, [r1, #8]
+ ldr r10, [r1, #12]
+ ldmib r2, {r6, r7}
+ ldr r4, [r2, #16]
+ ldr r11, [r2]
+ ldr r5, [r2, #12]
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ adds r9, r11, r12
+ ldr r11, [r1, #44]
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ str r4, [sp, #20] @ 4-byte Spill
+ ldr r4, [r2, #28]
+ str r4, [sp, #40] @ 4-byte Spill
+ ldr r4, [r2, #32]
+ str r4, [sp, #16] @ 4-byte Spill
+ ldr r4, [r2, #36]
+ str r4, [sp, #24] @ 4-byte Spill
+ ldr r4, [r2, #40]
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [r2, #44]
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r2, #48]
+ ldr r2, [r2, #52]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #36]
+ str r4, [sp, #36] @ 4-byte Spill
+ adcs r4, r6, lr
+ add lr, r1, #16
+ adcs r7, r7, r8
+ ldr r8, [r1, #52]
+ adcs r6, r5, r10
+ ldr r5, [r1, #32]
+ ldr r10, [r1, #48]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ str r9, [r0]
+ stmib r0, {r4, r7}
+ str r6, [r0, #12]
+ ldr r4, [sp, #8] @ 4-byte Reload
+ ldr r7, [sp] @ 4-byte Reload
+ adcs r1, r4, r1
+ ldr r4, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r2, r4, r2
+ str r2, [r0, #20]
+ adcs r1, r1, r12
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [sp, #20] @ 4-byte Spill
+ adcs r2, r1, r5
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r2, [sp, #16] @ 4-byte Spill
+ adcs r5, r1, r7
+ ldr r1, [sp, #28] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ adcs r12, r1, r7
+ ldr r1, [sp, #32] @ 4-byte Reload
+ mov r7, #0
+ str r12, [sp, #40] @ 4-byte Spill
+ adcs lr, r1, r11
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r4, r1, r10
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r9, r1, r8
+ adc r1, r7, #0
+ str r1, [sp, #44] @ 4-byte Spill
+ ldm r3, {r1, r7, r11}
+ ldr r10, [r3, #12]
+ ldr r8, [r3, #16]
+ ldr r6, [r3, #20]
+ ldr r3, [r3, #24]
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [sp, #20] @ 4-byte Reload
+ subs r1, r3, r1
+ sbcs r7, r2, r7
+ sbcs r2, r5, r11
+ mov r11, lr
+ sbcs r10, r12, r10
+ sbcs r12, lr, r8
+ sbcs lr, r4, r6
+ ldr r6, [sp, #36] @ 4-byte Reload
+ sbcs r8, r9, r6
+ ldr r6, [sp, #44] @ 4-byte Reload
+ sbc r6, r6, #0
+ ands r6, r6, #1
+ movne r1, r3
+ movne r2, r5
+ str r1, [r0, #28]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ movne r7, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ cmp r6, #0
+ movne r12, r11
+ movne lr, r4
+ str r7, [r0, #32]
+ str r2, [r0, #36]
+ movne r10, r1
+ cmp r6, #0
+ movne r8, r9
+ str r10, [r0, #40]
+ str r12, [r0, #44]
+ str lr, [r0, #48]
+ str r8, [r0, #52]
+ add sp, sp, #48
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end108:
+ .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub7L
+ .align 2
+ .type mcl_fpDbl_sub7L,%function
+mcl_fpDbl_sub7L: @ @mcl_fpDbl_sub7L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #68
+ sub sp, sp, #68
+ ldr r7, [r2, #32]
+ add r8, r1, #16
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldm r2, {r4, r7}
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #8]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #12]
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [r2, #16]
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ ldr r2, [r2, #20]
+ str r7, [sp, #36] @ 4-byte Spill
+ str r2, [sp, #24] @ 4-byte Spill
+ ldmib r1, {r2, r12, lr}
+ ldm r8, {r5, r6, r8}
+ ldr r7, [r1, #28]
+ ldr r11, [r1]
+ ldr r9, [r1, #32]
+ ldr r10, [r1, #44]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r1, #36]
+ subs r4, r11, r4
+ str r4, [r0]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r1, #40]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r1, #48]
+ ldr r1, [r1, #52]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [sp, #20] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp, #16] @ 4-byte Reload
+ sbcs r12, r12, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ stmib r0, {r2, r12}
+ ldr r2, [sp, #32] @ 4-byte Reload
+ sbcs lr, lr, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str lr, [r0, #12]
+ sbcs r2, r5, r2
+ str r2, [r0, #16]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ sbcs r2, r6, r2
+ ldr r6, [sp, #8] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #40] @ 4-byte Reload
+ sbcs r2, r8, r2
+ mov r8, #0
+ str r2, [r0, #24]
+ ldr r2, [sp, #36] @ 4-byte Reload
+ sbcs lr, r7, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ sbcs r4, r9, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ ldr r9, [r3, #20]
+ str r4, [sp, #44] @ 4-byte Spill
+ sbcs r7, r7, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ sbcs r12, r6, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ ldr r6, [sp] @ 4-byte Reload
+ str r12, [sp, #52] @ 4-byte Spill
+ sbcs r11, r10, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ ldr r10, [r3, #12]
+ sbcs r6, r6, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ sbcs r5, r1, r2
+ ldr r2, [r3, #8]
+ sbc r1, r8, #0
+ ldr r8, [r3, #4]
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [r3]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ ldr r3, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adds r1, lr, r1
+ adcs r4, r4, r8
+ adcs r2, r7, r2
+ adcs r10, r12, r10
+ adcs r12, r11, r3
+ ldr r3, [sp, #56] @ 4-byte Reload
+ adcs r8, r6, r9
+ adc r9, r5, r3
+ ldr r3, [sp, #64] @ 4-byte Reload
+ ands r3, r3, #1
+ moveq r1, lr
+ moveq r2, r7
+ str r1, [r0, #28]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ moveq r4, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ cmp r3, #0
+ moveq r12, r11
+ moveq r8, r6
+ str r4, [r0, #32]
+ str r2, [r0, #36]
+ moveq r10, r1
+ cmp r3, #0
+ moveq r9, r5
+ str r10, [r0, #40]
+ str r12, [r0, #44]
+ str r8, [r0, #48]
+ str r9, [r0, #52]
+ add sp, sp, #68
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end109:
+ .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv256x32,%function
+.LmulPv256x32: @ @mulPv256x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r1, [r1, #28]
+ umull r3, r7, r1, r2
+ adcs r1, r6, r3
+ str r1, [r0, #28]
+ adc r1, r7, #0
+ str r1, [r0, #32]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end110:
+ .size .LmulPv256x32, .Lfunc_end110-.LmulPv256x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre8L
+ .align 2
+ .type mcl_fp_mulUnitPre8L,%function
+mcl_fp_mulUnitPre8L: @ @mcl_fp_mulUnitPre8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r11, lr}
+ push {r4, r5, r6, r7, r11, lr}
+ .pad #40
+ sub sp, sp, #40
+ mov r4, r0
+ mov r0, sp
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #32]
+ add lr, sp, #16
+ ldr r12, [sp, #28]
+ ldm lr, {r1, r3, lr}
+ ldm sp, {r2, r5, r6, r7}
+ str r0, [r4, #32]
+ add r0, r4, #16
+ stm r4, {r2, r5, r6, r7}
+ stm r0, {r1, r3, lr}
+ str r12, [r4, #28]
+ add sp, sp, #40
+ pop {r4, r5, r6, r7, r11, lr}
+ mov pc, lr
+.Lfunc_end111:
+ .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre8L
+ .align 2
+ .type mcl_fpDbl_mulPre8L,%function
+mcl_fpDbl_mulPre8L: @ @mcl_fpDbl_mulPre8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #124
+ sub sp, sp, #124
+ mov r6, r2
+ mov r5, r1
+ mov r4, r0
+ bl mcl_fpDbl_mulPre4L(PLT)
+ add r0, r4, #32
+ add r1, r5, #16
+ add r2, r6, #16
+ bl mcl_fpDbl_mulPre4L(PLT)
+ ldm r6, {r12, lr}
+ ldr r7, [r6, #16]
+ ldr r9, [r6, #8]
+ ldr r3, [r6, #12]
+ add r6, r6, #20
+ mov r8, #0
+ ldm r6, {r0, r1, r6}
+ adds r2, r12, r7
+ adcs r0, lr, r0
+ str r2, [sp, #56] @ 4-byte Spill
+ adcs r1, r9, r1
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r9, [r5]
+ str r1, [sp, #44] @ 4-byte Spill
+ adcs r1, r3, r6
+ str r1, [sp, #48] @ 4-byte Spill
+ adc r6, r8, #0
+ ldmib r5, {r8, r10, r12}
+ ldr r7, [r5, #16]
+ ldr r3, [r5, #20]
+ ldr lr, [r5, #24]
+ ldr r11, [r5, #28]
+ str r2, [sp, #60]
+ str r0, [sp, #64]
+ mov r0, #0
+ add r2, sp, #60
+ adds r5, r9, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ adcs r8, r8, r3
+ str r5, [sp, #76]
+ adcs r10, r10, lr
+ str r8, [sp, #80]
+ adcs r9, r12, r11
+ str r10, [sp, #84]
+ str r7, [sp, #68]
+ str r1, [sp, #72]
+ adc r11, r0, #0
+ add r0, sp, #92
+ add r1, sp, #76
+ str r9, [sp, #88]
+ bl mcl_fpDbl_mulPre4L(PLT)
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r1, [sp, #52] @ 4-byte Reload
+ cmp r6, #0
+ ldr r3, [sp, #48] @ 4-byte Reload
+ and r12, r6, r11
+ ldr lr, [sp, #120]
+ moveq r5, r6
+ moveq r9, r6
+ moveq r10, r6
+ moveq r8, r6
+ ldr r6, [sp, #116]
+ adds r0, r5, r0
+ adcs r1, r8, r1
+ adcs r2, r10, r7
+ mov r7, #0
+ adcs r3, r9, r3
+ adc r7, r7, #0
+ cmp r11, #0
+ moveq r0, r5
+ ldr r5, [sp, #108]
+ moveq r2, r10
+ moveq r3, r9
+ moveq r7, r11
+ moveq r1, r8
+ adds r8, r0, r5
+ ldr r5, [sp, #112]
+ adcs r10, r1, r5
+ adcs r9, r2, r6
+ ldr r6, [r4]
+ ldmib r4, {r5, r11}
+ ldr r2, [sp, #92]
+ adcs lr, r3, lr
+ add r3, sp, #96
+ adc r12, r7, r12
+ ldr r7, [r4, #12]
+ ldm r3, {r0, r1, r3}
+ subs r2, r2, r6
+ str r2, [sp, #52] @ 4-byte Spill
+ sbcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ sbcs r0, r1, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ sbcs r0, r3, r7
+ ldr r7, [r4, #20]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r4, #16]
+ str r0, [sp, #56] @ 4-byte Spill
+ sbcs r0, r8, r0
+ ldr r8, [r4, #28]
+ str r0, [sp, #28] @ 4-byte Spill
+ sbcs r0, r10, r7
+ ldr r10, [r4, #24]
+ str r0, [sp, #24] @ 4-byte Spill
+ sbcs r0, r9, r10
+ str r0, [sp, #20] @ 4-byte Spill
+ sbcs r0, lr, r8
+ add lr, r4, #32
+ str r0, [sp, #16] @ 4-byte Spill
+ sbc r0, r12, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm lr, {r5, r9, lr}
+ ldr r6, [sp, #52] @ 4-byte Reload
+ ldr r12, [r4, #44]
+ ldr r2, [r4, #48]
+ ldr r0, [r4, #52]
+ ldr r1, [r4, #56]
+ ldr r3, [r4, #60]
+ subs r6, r6, r5
+ str r1, [sp, #36] @ 4-byte Spill
+ str r3, [sp, #32] @ 4-byte Spill
+ str r6, [sp] @ 4-byte Spill
+ ldr r6, [sp, #48] @ 4-byte Reload
+ sbcs r11, r6, r9
+ ldr r6, [sp, #44] @ 4-byte Reload
+ sbcs r6, r6, lr
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [sp, #40] @ 4-byte Reload
+ sbcs r6, r6, r12
+ str r6, [sp, #8] @ 4-byte Spill
+ ldr r6, [sp, #28] @ 4-byte Reload
+ sbcs r6, r6, r2
+ str r6, [sp, #28] @ 4-byte Spill
+ ldr r6, [sp, #24] @ 4-byte Reload
+ sbcs r6, r6, r0
+ str r6, [sp, #40] @ 4-byte Spill
+ mov r6, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ sbcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ sbc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adds r3, r0, r1
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r7, r7, r11
+ str r3, [r4, #16]
+ str r7, [r4, #20]
+ adcs r3, r10, r0
+ ldr r0, [sp, #8] @ 4-byte Reload
+ str r3, [r4, #24]
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r1, r8, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ str r1, [r4, #28]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [r4, #32]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r1, r9, r1
+ str r1, [r4, #36]
+ adcs r0, lr, r0
+ str r0, [r4, #40]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r12, r0
+ add r12, r4, #48
+ str r0, [r4, #44]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adcs r1, r6, #0
+ adcs r2, r2, #0
+ adc r3, r3, #0
+ stm r12, {r0, r1, r2, r3}
+ add sp, sp, #124
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end112:
+ .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre8L
+ .align 2
+ .type mcl_fpDbl_sqrPre8L,%function
+mcl_fpDbl_sqrPre8L: @ @mcl_fpDbl_sqrPre8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #124
+ sub sp, sp, #124
+ mov r5, r1
+ mov r4, r0
+ mov r2, r5
+ bl mcl_fpDbl_mulPre4L(PLT)
+ add r1, r5, #16
+ add r0, r4, #32
+ mov r2, r1
+ bl mcl_fpDbl_mulPre4L(PLT)
+ ldm r5, {r0, r8, lr}
+ ldr r3, [r5, #16]
+ ldr r2, [r5, #20]
+ ldr r6, [r5, #24]
+ ldr r12, [r5, #12]
+ ldr r1, [r5, #28]
+ adds r9, r0, r3
+ add r0, sp, #64
+ adcs r5, r8, r2
+ str r9, [sp, #76]
+ str r9, [sp, #60]
+ add r2, sp, #60
+ adcs r6, lr, r6
+ str r5, [sp, #80]
+ adcs r7, r12, r1
+ str r6, [sp, #84]
+ add r1, sp, #76
+ str r7, [sp, #88]
+ stm r0, {r5, r6, r7}
+ mov r0, #0
+ adc r8, r0, #0
+ add r0, sp, #92
+ bl mcl_fpDbl_mulPre4L(PLT)
+ adds r12, r9, r9
+ adcs lr, r5, r5
+ adcs r9, r6, r6
+ add r6, sp, #112
+ ldm r6, {r0, r5, r6}
+ ldr r1, [sp, #108]
+ adc r10, r7, r7
+ adds r2, r1, r12
+ adcs r3, r0, lr
+ adcs r12, r5, r9
+ adcs lr, r6, r10
+ adc r7, r8, r7, lsr #31
+ cmp r8, #0
+ moveq lr, r6
+ add r6, sp, #92
+ moveq r7, r8
+ moveq r12, r5
+ moveq r3, r0
+ moveq r2, r1
+ ldm r4, {r8, r9, r10, r11}
+ ldm r6, {r0, r1, r5, r6}
+ subs r0, r0, r8
+ ldr r8, [r4, #20]
+ str r0, [sp, #52] @ 4-byte Spill
+ sbcs r0, r1, r9
+ ldr r9, [r4, #24]
+ str r0, [sp, #48] @ 4-byte Spill
+ sbcs r0, r5, r10
+ ldr r10, [r4, #28]
+ str r0, [sp, #44] @ 4-byte Spill
+ sbcs r0, r6, r11
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r4, #16]
+ str r0, [sp, #56] @ 4-byte Spill
+ sbcs r0, r2, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ sbcs r0, r3, r8
+ str r0, [sp, #24] @ 4-byte Spill
+ sbcs r0, r12, r9
+ str r0, [sp, #20] @ 4-byte Spill
+ sbcs r0, lr, r10
+ add lr, r4, #32
+ str r0, [sp, #16] @ 4-byte Spill
+ sbc r0, r7, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm lr, {r5, r7, lr}
+ ldr r6, [sp, #52] @ 4-byte Reload
+ ldr r12, [r4, #44]
+ ldr r2, [r4, #48]
+ ldr r0, [r4, #52]
+ ldr r1, [r4, #56]
+ ldr r3, [r4, #60]
+ subs r6, r6, r5
+ str r1, [sp, #36] @ 4-byte Spill
+ str r3, [sp, #32] @ 4-byte Spill
+ str r6, [sp] @ 4-byte Spill
+ ldr r6, [sp, #48] @ 4-byte Reload
+ sbcs r11, r6, r7
+ ldr r6, [sp, #44] @ 4-byte Reload
+ sbcs r6, r6, lr
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [sp, #40] @ 4-byte Reload
+ sbcs r6, r6, r12
+ str r6, [sp, #8] @ 4-byte Spill
+ ldr r6, [sp, #28] @ 4-byte Reload
+ sbcs r6, r6, r2
+ str r6, [sp, #28] @ 4-byte Spill
+ ldr r6, [sp, #24] @ 4-byte Reload
+ sbcs r6, r6, r0
+ str r6, [sp, #40] @ 4-byte Spill
+ mov r6, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ sbcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ sbc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adds r3, r1, r0
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r1, r11, r8
+ str r3, [r4, #16]
+ str r1, [r4, #20]
+ adcs r3, r0, r9
+ ldr r0, [sp, #8] @ 4-byte Reload
+ str r3, [r4, #24]
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r1, r0, r10
+ ldr r0, [sp, #28] @ 4-byte Reload
+ str r1, [r4, #28]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [r4, #32]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [r4, #36]
+ adcs r0, r0, lr
+ str r0, [r4, #40]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ add r12, r4, #48
+ str r0, [r4, #44]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adcs r1, r6, #0
+ adcs r2, r2, #0
+ adc r3, r3, #0
+ stm r12, {r0, r1, r2, r3}
+ add sp, sp, #124
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end113:
+ .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont8L
+ .align 2
+ .type mcl_fp_mont8L,%function
+mcl_fp_mont8L: @ @mcl_fp_mont8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #724
+ sub sp, sp, #724
+ mov r7, r2
+ ldr r5, [r3, #-4]
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #680
+ str r3, [sp, #64] @ 4-byte Spill
+ str r1, [sp, #68] @ 4-byte Spill
+ mov r4, r3
+ mov r11, r1
+ ldr r2, [r7]
+ str r7, [sp, #76] @ 4-byte Spill
+ str r5, [sp, #72] @ 4-byte Spill
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #684]
+ ldr r9, [sp, #680]
+ mov r1, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #688]
+ mul r2, r9, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #640
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #672]
+ add r10, sp, #644
+ ldr r4, [sp, #656]
+ ldr r6, [sp, #640]
+ mov r1, r11
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #664]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #660]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r5, r8, r10}
+ ldr r2, [r7, #4]
+ add r0, sp, #600
+ bl .LmulPv256x32(PLT)
+ adds r0, r6, r9
+ ldr r2, [sp, #12] @ 4-byte Reload
+ mov r1, #0
+ add r12, sp, #604
+ ldr r9, [sp, #628]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #632]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r10, r10, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #600]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r11, r2, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r7, r2, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r1, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r12, {r0, r1, r2, r3, r6, r12}
+ ldr lr, [sp, #48] @ 4-byte Reload
+ ldr r5, [sp, #44] @ 4-byte Reload
+ adds r4, lr, r4
+ adcs r0, r5, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ adcs r0, r10, r1
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r11, r6
+ ldr r6, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ mov r1, r6
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #560
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #592]
+ ldr r5, [sp, #76] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r7, [sp, #576]
+ ldr r10, [sp, #560]
+ ldr r11, [sp, #564]
+ ldr r8, [sp, #568]
+ ldr r9, [sp, #572]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ ldr r2, [r5, #8]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #520
+ bl .LmulPv256x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #520
+ ldr r4, [sp, #544]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r8, [sp, #552]
+ adcs r11, r0, r9
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r9, [sp, #548]
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adds r7, r7, r0
+ adcs r0, r10, r1
+ mov r1, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ adcs r0, r11, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #480
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #512]
+ ldr r2, [r5, #12]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #500]
+ ldr r6, [sp, #496]
+ ldr r10, [sp, #480]
+ ldr r11, [sp, #484]
+ ldr r8, [sp, #488]
+ ldr r9, [sp, #492]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #440
+ bl .LmulPv256x32(PLT)
+ adds r0, r7, r10
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #440
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r11
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r8, [sp, #472]
+ adcs r11, r0, r9
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r9, [sp, #468]
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #464]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r5, r0
+ adcs r0, r10, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ adcs r0, r11, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r6, r4
+ ldr r6, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ mul r2, r7, r6
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #400
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #432]
+ ldr r5, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #416]
+ ldr r10, [sp, #400]
+ ldr r11, [sp, #404]
+ ldr r8, [sp, #408]
+ ldr r9, [sp, #412]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #428]
+ mov r1, r5
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #424]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #420]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #360
+ bl .LmulPv256x32(PLT)
+ adds r0, r7, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #360
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r7, r0, r11
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r8, [sp, #392]
+ adcs r11, r0, r9
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r9, [sp, #388]
+ adcs r0, r0, r4
+ ldr r4, [sp, #384]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r7, r0
+ adcs r0, r10, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ adcs r0, r11, r2
+ mul r2, r7, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #320
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #352]
+ ldr r6, [sp, #340]
+ ldr r4, [sp, #336]
+ ldr r10, [sp, #320]
+ ldr r11, [sp, #324]
+ ldr r8, [sp, #328]
+ ldr r9, [sp, #332]
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #348]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #344]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #280
+ bl .LmulPv256x32(PLT)
+ adds r0, r7, r10
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #280
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r11
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r8, [sp, #312]
+ adcs r11, r0, r9
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r9, [sp, #308]
+ adcs r0, r0, r4
+ ldr r4, [sp, #304]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r5, r0
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ adcs r0, r11, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r6, r4
+ ldr r6, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ mul r2, r7, r6
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #240
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #272]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #256]
+ ldr r10, [sp, #240]
+ ldr r11, [sp, #244]
+ ldr r8, [sp, #248]
+ ldr r9, [sp, #252]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #264]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #200
+ bl .LmulPv256x32(PLT)
+ adds r0, r7, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #200
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r7, r0, r11
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r8, [sp, #232]
+ adcs r11, r0, r9
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r9, [sp, #228]
+ adcs r0, r0, r4
+ ldr r4, [sp, #224]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r7, r0
+ adcs r0, r10, r1
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ adcs r0, r11, r2
+ mul r2, r7, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #160
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #192]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r6, [sp, #184]
+ ldr r4, [sp, #180]
+ ldr r5, [sp, #176]
+ ldr r10, [sp, #160]
+ ldr r11, [sp, #164]
+ ldr r8, [sp, #168]
+ ldr r9, [sp, #172]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #120
+ bl .LmulPv256x32(PLT)
+ adds r0, r7, r10
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ ldr r12, [sp, #124]
+ ldr r3, [sp, #128]
+ add lr, sp, #136
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ adcs r8, r1, r8
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r9, r1, r9
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r11, r1, r4
+ ldr r1, [sp, #36] @ 4-byte Reload
+ ldr r4, [sp, #132]
+ adcs r1, r1, r6
+ ldr r6, [sp, #152]
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r10, r1, r2
+ ldr r1, [sp, #28] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #120]
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adc r1, r1, #0
+ adds r5, r0, r2
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r8, r8, r12
+ str r1, [sp, #52] @ 4-byte Spill
+ adcs r3, r9, r3
+ mul r7, r5, r0
+ ldm lr, {r0, r1, r2, lr}
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [sp, #76] @ 4-byte Reload
+ adcs r3, r3, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r9, r11, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r3, [sp, #44] @ 4-byte Spill
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ adcs r0, r10, r2
+ mov r2, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r10, r0, r6
+ mov r0, #0
+ adc r11, r0, #0
+ add r0, sp, #80
+ bl .LmulPv256x32(PLT)
+ add r3, sp, #80
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r5, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs lr, r8, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str lr, [sp, #40] @ 4-byte Spill
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r7, r0, r3
+ ldr r0, [sp, #96]
+ str r7, [sp, #52] @ 4-byte Spill
+ adcs r9, r9, r0
+ ldr r0, [sp, #100]
+ adcs r12, r1, r0
+ ldr r0, [sp, #104]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r12, [sp, #68] @ 4-byte Spill
+ adcs r8, r1, r0
+ ldr r0, [sp, #108]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r8, [sp, #72] @ 4-byte Spill
+ adcs r6, r1, r0
+ ldr r0, [sp, #112]
+ adcs r5, r10, r0
+ adc r0, r11, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldm r4, {r1, r2, r3, r11}
+ ldr r0, [r4, #16]
+ ldr r10, [r4, #24]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r4, #20]
+ subs r1, lr, r1
+ ldr lr, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r4, #28]
+ sbcs r2, lr, r2
+ ldr r4, [sp, #48] @ 4-byte Reload
+ sbcs r3, r7, r3
+ sbcs r7, r9, r11
+ mov r11, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbcs r0, r12, r0
+ sbcs r12, r8, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ sbcs r8, r6, r10
+ mov r10, r5
+ sbcs r4, r5, r4
+ ldr r5, [sp, #76] @ 4-byte Reload
+ sbc r6, r5, #0
+ ldr r5, [sp, #40] @ 4-byte Reload
+ ands r6, r6, #1
+ movne r2, lr
+ movne r1, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ str r1, [r5]
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r2, [r5, #4]
+ movne r3, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ cmp r6, #0
+ movne r7, r9
+ str r3, [r5, #8]
+ str r7, [r5, #12]
+ movne r0, r1
+ str r0, [r5, #16]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ movne r12, r0
+ cmp r6, #0
+ movne r8, r11
+ movne r4, r10
+ str r12, [r5, #20]
+ str r8, [r5, #24]
+ str r4, [r5, #28]
+ add sp, sp, #724
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end114:
+ .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF8L
+ .align 2
+ .type mcl_fp_montNF8L,%function
+mcl_fp_montNF8L: @ @mcl_fp_montNF8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #716
+ sub sp, sp, #716
+ mov r7, r2
+ ldr r5, [r3, #-4]
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #672
+ str r3, [sp, #60] @ 4-byte Spill
+ str r1, [sp, #68] @ 4-byte Spill
+ mov r4, r3
+ mov r10, r1
+ ldr r2, [r7]
+ str r7, [sp, #56] @ 4-byte Spill
+ str r5, [sp, #64] @ 4-byte Spill
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #676]
+ ldr r11, [sp, #672]
+ mov r1, r4
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #680]
+ mul r2, r11, r5
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #684]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #688]
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #632
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #664]
+ ldr r2, [r7, #4]
+ ldr r4, [sp, #648]
+ ldr r6, [sp, #632]
+ ldr r8, [sp, #636]
+ ldr r5, [sp, #640]
+ ldr r9, [sp, #644]
+ mov r1, r10
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #660]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #656]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #652]
+ str r0, [sp, #4] @ 4-byte Spill
+ add r0, sp, #592
+ bl .LmulPv256x32(PLT)
+ adds r0, r6, r11
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add r6, sp, #596
+ ldr r12, [sp, #616]
+ ldr r3, [sp, #612]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #620]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r9, r9, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r4, [sp, #592]
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r10, r1, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r7, r1, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adc r0, r1, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #624]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r6, {r0, r1, r2, r6}
+ ldr lr, [sp, #40] @ 4-byte Reload
+ ldr r5, [sp, #36] @ 4-byte Reload
+ adds r4, lr, r4
+ adcs r0, r5, r0
+ ldr r5, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r9, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r11, r2
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ mul r2, r4, r5
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r10, r3
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r7, r12
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ mov r1, r7
+ adcs r0, r0, r8
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #552
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #584]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r9, [sp, #568]
+ ldr r10, [sp, #552]
+ ldr r11, [sp, #556]
+ ldr r8, [sp, #560]
+ ldr r6, [sp, #564]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #576]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #572]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #512
+ bl .LmulPv256x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #516
+ ldr r4, [sp, #536]
+ ldr r3, [sp, #512]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #40] @ 4-byte Reload
+ ldr r8, [sp, #540]
+ adcs r11, r0, r6
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r12, lr}
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adds r9, r6, r3
+ adcs r0, r10, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r11, r1
+ mov r1, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r9, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #472
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #504]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #492]
+ ldr r7, [sp, #488]
+ ldr r10, [sp, #472]
+ ldr r11, [sp, #476]
+ ldr r8, [sp, #480]
+ ldr r6, [sp, #484]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #500]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #496]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #432
+ bl .LmulPv256x32(PLT)
+ adds r0, r9, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r3, [sp, #432]
+ add lr, sp, #436
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r5, r0, r11
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #40] @ 4-byte Reload
+ ldr r8, [sp, #460]
+ adcs r11, r0, r6
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r7, r0, r7
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #456]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r9, r5, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #464]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r12, lr}
+ adcs r0, r10, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r11, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r7, r2
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r6, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ mul r2, r9, r0
+ add r0, sp, #392
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #424]
+ ldr r5, [sp, #56] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #408]
+ ldr r10, [sp, #392]
+ ldr r11, [sp, #396]
+ ldr r8, [sp, #400]
+ ldr r6, [sp, #404]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #420]
+ ldr r2, [r5, #16]
+ mov r1, r7
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #4] @ 4-byte Spill
+ add r0, sp, #352
+ bl .LmulPv256x32(PLT)
+ adds r0, r9, r10
+ ldr r1, [sp, #4] @ 4-byte Reload
+ ldr r3, [sp, #352]
+ add lr, sp, #356
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r9, r0, r11
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #40] @ 4-byte Reload
+ ldr r8, [sp, #380]
+ adcs r11, r0, r6
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #376]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r9, r9, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #384]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r12, lr}
+ adcs r0, r10, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r11, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r6, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ mul r2, r9, r0
+ add r0, sp, #312
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #344]
+ ldr r2, [r5, #20]
+ ldr r4, [sp, #328]
+ ldr r10, [sp, #312]
+ ldr r11, [sp, #316]
+ ldr r8, [sp, #320]
+ ldr r6, [sp, #324]
+ mov r1, r7
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #340]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #336]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #332]
+ str r0, [sp, #4] @ 4-byte Spill
+ add r0, sp, #272
+ bl .LmulPv256x32(PLT)
+ adds r0, r9, r10
+ ldr r1, [sp, #4] @ 4-byte Reload
+ ldr r3, [sp, #272]
+ add lr, sp, #276
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r5, r0, r11
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #40] @ 4-byte Reload
+ ldr r8, [sp, #300]
+ adcs r11, r0, r6
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r7, r0, r4
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r4, [sp, #296]
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r9, r5, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #304]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r12, lr}
+ ldr r5, [sp, #60] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r11, r1
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r7, r2
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ mul r2, r9, r7
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r6, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #232
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #264]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #248]
+ ldr r10, [sp, #232]
+ ldr r11, [sp, #236]
+ ldr r8, [sp, #240]
+ ldr r6, [sp, #244]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #256]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #252]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #192
+ bl .LmulPv256x32(PLT)
+ adds r0, r9, r10
+ ldr r1, [sp, #4] @ 4-byte Reload
+ ldr r3, [sp, #192]
+ add lr, sp, #196
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r9, r0, r11
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r10, r0, r8
+ ldr r0, [sp, #40] @ 4-byte Reload
+ ldr r8, [sp, #220]
+ adcs r11, r0, r6
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #216]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r9, r9, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #224]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r12, lr}
+ adcs r0, r10, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r11, r1
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r9, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r6, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #152
+ bl .LmulPv256x32(PLT)
+ ldr r0, [sp, #184]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r5, [sp, #176]
+ ldr r4, [sp, #172]
+ ldr r7, [sp, #168]
+ ldr r10, [sp, #152]
+ ldr r11, [sp, #156]
+ ldr r8, [sp, #160]
+ ldr r6, [sp, #164]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #180]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #112
+ bl .LmulPv256x32(PLT)
+ adds r0, r9, r10
+ ldr r1, [sp, #44] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #128
+ ldr r12, [sp, #116]
+ ldr r3, [sp, #120]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r11
+ adcs r1, r1, r8
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r11, r1, r7
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r10, r1, r4
+ ldr r1, [sp, #28] @ 4-byte Reload
+ ldr r4, [sp, #124]
+ adcs r1, r1, r5
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #112]
+ str r1, [sp, #40] @ 4-byte Spill
+ adds r5, r0, r2
+ ldr r0, [sp, #64] @ 4-byte Reload
+ mul r9, r5, r0
+ ldm lr, {r0, r1, r2, r6, lr}
+ ldr r8, [sp, #68] @ 4-byte Reload
+ adcs r7, r8, r12
+ ldr r8, [sp, #60] @ 4-byte Reload
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adcs r3, r7, r3
+ adcs r11, r11, r4
+ str r3, [sp, #56] @ 4-byte Spill
+ adcs r4, r10, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ mov r2, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r10, r0, r6
+ add r0, sp, #72
+ adc r7, lr, #0
+ bl .LmulPv256x32(PLT)
+ add r3, sp, #72
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r5, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r5, r0, r1
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r11, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #88]
+ adcs r3, r4, r0
+ ldr r0, [sp, #92]
+ str r3, [sp, #40] @ 4-byte Spill
+ adcs r6, r1, r0
+ ldr r0, [sp, #96]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r6, [sp, #64] @ 4-byte Spill
+ adcs r12, r1, r0
+ ldr r0, [sp, #100]
+ ldr r1, [sp, #104]
+ str r12, [sp, #68] @ 4-byte Spill
+ adcs r11, r10, r0
+ adc r4, r7, r1
+ ldm r8, {r1, r2, r9, r10}
+ ldr r0, [r8, #20]
+ ldr r7, [r8, #16]
+ ldr lr, [r8, #28]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r8, #24]
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, r5
+ subs r5, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r8, r1, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ sbcs r9, r2, r9
+ sbcs r10, r3, r10
+ ldr r3, [sp, #36] @ 4-byte Reload
+ sbcs r7, r6, r7
+ sbcs r6, r12, r3
+ ldr r3, [sp, #44] @ 4-byte Reload
+ sbcs r12, r11, r3
+ sbc lr, r4, lr
+ cmp lr, #0
+ movlt r5, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ movlt r8, r1
+ movlt r9, r2
+ cmp lr, #0
+ movlt r10, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ movlt r7, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ movlt r6, r0
+ cmp lr, #0
+ movlt lr, r4
+ ldr r4, [sp, #52] @ 4-byte Reload
+ movlt r12, r11
+ add r0, r4, #20
+ stm r4, {r5, r8, r9, r10}
+ str r7, [r4, #16]
+ stm r0, {r6, r12, lr}
+ add sp, sp, #716
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end115:
+ .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed8L
+ .align 2
+ .type mcl_fp_montRed8L,%function
+mcl_fp_montRed8L: @ @mcl_fp_montRed8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #420
+ sub sp, sp, #420
+ mov r5, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r4, [r1]
+ ldr r9, [r1, #40]
+ ldr r10, [r1, #44]
+ ldr r0, [r5]
+ ldr r11, [r5, #-4]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r5, #4]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r5, #8]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #16]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r5, #12]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r1, #20]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r5, #16]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #24]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r5, #20]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #28]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r5, #24]
+ str r2, [sp, #44] @ 4-byte Spill
+ mul r2, r4, r11
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r5, #28]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ mov r1, r5
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #376
+ bl .LmulPv256x32(PLT)
+ add lr, sp, #396
+ ldr r8, [sp, #408]
+ add r6, sp, #384
+ ldm lr, {r3, r12, lr}
+ ldr r7, [sp, #376]
+ ldr r1, [sp, #380]
+ ldm r6, {r0, r2, r6}
+ adds r4, r4, r7
+ ldr r4, [sp, #56] @ 4-byte Reload
+ adcs r4, r4, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r4, r11
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ adcs r9, r9, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ adcs r0, r10, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #4] @ 4-byte Spill
+ add r0, sp, #336
+ bl .LmulPv256x32(PLT)
+ add lr, sp, #356
+ ldr r8, [sp, #368]
+ add r6, sp, #340
+ ldm lr, {r3, r12, lr}
+ ldr r7, [sp, #336]
+ ldm r6, {r0, r1, r2, r6}
+ adds r4, r4, r7
+ ldr r4, [sp, #56] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r10, r0, r2
+ ldr r0, [sp, #36] @ 4-byte Reload
+ mul r2, r4, r11
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r9, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #296
+ bl .LmulPv256x32(PLT)
+ add r8, sp, #320
+ add lr, sp, #300
+ ldm r8, {r6, r7, r8}
+ ldr r1, [sp, #296]
+ ldm lr, {r0, r2, r3, r12, lr}
+ adds r1, r4, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r4, r1, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r10, r10, r2
+ mul r2, r4, r11
+ adcs r9, r0, r3
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #256
+ bl .LmulPv256x32(PLT)
+ add lr, sp, #276
+ ldr r8, [sp, #288]
+ add r6, sp, #260
+ ldm lr, {r3, r12, lr}
+ ldr r7, [sp, #256]
+ ldm r6, {r0, r1, r2, r6}
+ adds r4, r4, r7
+ adcs r4, r10, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r9, r9, r1
+ mov r1, r5
+ adcs r10, r0, r2
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mul r2, r4, r11
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #216
+ bl .LmulPv256x32(PLT)
+ add r8, sp, #240
+ add lr, sp, #220
+ ldm r8, {r6, r7, r8}
+ ldr r1, [sp, #216]
+ ldm lr, {r0, r2, r3, r12, lr}
+ adds r1, r4, r1
+ adcs r4, r9, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mov r1, r5
+ adcs r10, r10, r2
+ mul r2, r4, r11
+ adcs r9, r0, r3
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #176
+ bl .LmulPv256x32(PLT)
+ add lr, sp, #196
+ ldr r8, [sp, #208]
+ add r6, sp, #180
+ ldm lr, {r3, r12, lr}
+ ldr r7, [sp, #176]
+ ldm r6, {r0, r1, r2, r6}
+ adds r4, r4, r7
+ adcs r4, r10, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r7, r9, r1
+ mov r1, r5
+ adcs r9, r0, r2
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mul r2, r4, r11
+ adcs r6, r0, r6
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r10, r0, r3
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #136
+ bl .LmulPv256x32(PLT)
+ add r12, sp, #136
+ ldm r12, {r0, r1, r3, r12}
+ adds r0, r4, r0
+ adcs r4, r7, r1
+ ldr r7, [sp, #152]
+ ldr r0, [sp, #168]
+ adcs r1, r9, r3
+ ldr r3, [sp, #160]
+ mul r2, r4, r11
+ adcs r9, r6, r12
+ ldr r6, [sp, #156]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #164]
+ adcs r10, r10, r7
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adcs r6, r7, r6
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r8, r7, r3
+ ldr r3, [sp, #48] @ 4-byte Reload
+ adcs r11, r3, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #96
+ bl .LmulPv256x32(PLT)
+ add r3, sp, #96
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r4, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r5, r0, r1
+ ldr r0, [sp, #112]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r9, r9, r2
+ adcs r10, r10, r3
+ adcs r3, r6, r0
+ ldr r0, [sp, #116]
+ str r3, [sp, #36] @ 4-byte Spill
+ adcs lr, r8, r0
+ ldr r0, [sp, #120]
+ str lr, [sp, #40] @ 4-byte Spill
+ adcs r7, r11, r0
+ ldr r0, [sp, #124]
+ str r7, [sp, #44] @ 4-byte Spill
+ adcs r4, r1, r0
+ ldr r0, [sp, #128]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r4, [sp, #48] @ 4-byte Spill
+ adcs r12, r1, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r8, r0, #0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ subs r1, r5, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ sbcs r2, r9, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ sbcs r6, r10, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r3, r0
+ ldr r3, [sp, #68] @ 4-byte Reload
+ sbcs r11, lr, r3
+ ldr r3, [sp, #72] @ 4-byte Reload
+ sbcs r3, r7, r3
+ ldr r7, [sp, #76] @ 4-byte Reload
+ sbcs lr, r4, r7
+ ldr r7, [sp, #60] @ 4-byte Reload
+ sbcs r4, r12, r7
+ sbc r7, r8, #0
+ ands r7, r7, #1
+ movne r1, r5
+ ldr r5, [sp, #92] @ 4-byte Reload
+ movne r2, r9
+ movne r6, r10
+ cmp r7, #0
+ str r1, [r5]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r2, [r5, #4]
+ str r6, [r5, #8]
+ movne r0, r1
+ str r0, [r5, #12]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ movne r11, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ str r11, [r5, #16]
+ movne r3, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ cmp r7, #0
+ movne r4, r12
+ str r3, [r5, #20]
+ movne lr, r0
+ str lr, [r5, #24]
+ str r4, [r5, #28]
+ add sp, sp, #420
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end116:
+ .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre8L
+ .align 2
+ .type mcl_fp_addPre8L,%function
+mcl_fp_addPre8L: @ @mcl_fp_addPre8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldr r3, [r1, #4]
+ ldr r9, [r1]
+ ldr r10, [r1, #12]
+ ldr r11, [r1, #16]
+ ldr r8, [r1, #28]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r1, #8]
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [r1, #20]
+ str r3, [sp] @ 4-byte Spill
+ ldr r3, [r1, #24]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldm r2, {r1, r3, r4, r5, r12, lr}
+ ldr r7, [sp, #12] @ 4-byte Reload
+ ldr r6, [r2, #24]
+ ldr r2, [r2, #28]
+ adds r1, r1, r9
+ adcs r3, r3, r7
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r4, r4, r7
+ ldr r7, [sp] @ 4-byte Reload
+ adcs r5, r5, r10
+ adcs r12, r12, r11
+ adcs lr, lr, r7
+ ldr r7, [sp, #4] @ 4-byte Reload
+ stm r0, {r1, r3, r4, r5, r12, lr}
+ adcs r6, r6, r7
+ adcs r2, r2, r8
+ str r6, [r0, #24]
+ str r2, [r0, #28]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end117:
+ .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre8L
+ .align 2
+ .type mcl_fp_subPre8L,%function
+mcl_fp_subPre8L: @ @mcl_fp_subPre8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldr r3, [r2, #4]
+ ldr r9, [r2]
+ ldr r10, [r2, #12]
+ ldr r11, [r2, #16]
+ ldr r8, [r2, #28]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r2, #8]
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [r2, #20]
+ str r3, [sp] @ 4-byte Spill
+ ldr r3, [r2, #24]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldm r1, {r2, r3, r4, r5, r12, lr}
+ ldr r7, [sp, #12] @ 4-byte Reload
+ ldr r6, [r1, #24]
+ ldr r1, [r1, #28]
+ subs r2, r2, r9
+ sbcs r3, r3, r7
+ ldr r7, [sp, #8] @ 4-byte Reload
+ sbcs r4, r4, r7
+ ldr r7, [sp] @ 4-byte Reload
+ sbcs r5, r5, r10
+ sbcs r12, r12, r11
+ sbcs lr, lr, r7
+ ldr r7, [sp, #4] @ 4-byte Reload
+ stm r0, {r2, r3, r4, r5, r12, lr}
+ sbcs r6, r6, r7
+ sbcs r1, r1, r8
+ str r6, [r0, #24]
+ str r1, [r0, #28]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end118:
+ .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_8L
+ .align 2
+ .type mcl_fp_shr1_8L,%function
+mcl_fp_shr1_8L: @ @mcl_fp_shr1_8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ ldr r3, [r1, #4]
+ ldr r12, [r1]
+ ldr lr, [r1, #12]
+ add r6, r1, #16
+ ldr r2, [r1, #8]
+ ldm r6, {r4, r5, r6}
+ ldr r1, [r1, #28]
+ lsrs r7, r3, #1
+ lsr r3, r3, #1
+ rrx r12, r12
+ lsrs r7, lr, #1
+ orr r8, r3, r2, lsl #31
+ lsr r7, lr, #1
+ rrx r2, r2
+ lsrs r3, r5, #1
+ lsr r5, r5, #1
+ str r12, [r0]
+ str r8, [r0, #4]
+ orr r7, r7, r4, lsl #31
+ rrx r3, r4
+ lsrs r4, r1, #1
+ str r2, [r0, #8]
+ orr r5, r5, r6, lsl #31
+ lsr r1, r1, #1
+ add r2, r0, #16
+ rrx r6, r6
+ str r7, [r0, #12]
+ stm r2, {r3, r5, r6}
+ str r1, [r0, #28]
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end119:
+ .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add8L
+ .align 2
+ .type mcl_fp_add8L,%function
+mcl_fp_add8L: @ @mcl_fp_add8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #28
+ sub sp, sp, #28
+ ldr r7, [r1, #12]
+ ldr lr, [r1]
+ ldr r11, [r1, #4]
+ ldr r10, [r1, #8]
+ add r8, r2, #20
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r1, #20]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r1, #24]
+ ldr r1, [r1, #28]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r1, [sp, #4] @ 4-byte Spill
+ ldm r2, {r1, r4, r5, r12}
+ ldr r9, [r2, #16]
+ ldm r8, {r6, r7, r8}
+ ldr r2, [sp] @ 4-byte Reload
+ adds lr, r1, lr
+ adcs r1, r4, r11
+ str lr, [r0]
+ adcs r4, r5, r10
+ ldr r5, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ str r4, [sp, #20] @ 4-byte Spill
+ adcs r10, r12, r5
+ adcs r5, r9, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r5, [sp, #16] @ 4-byte Spill
+ adcs r12, r6, r2
+ ldr r6, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ stmib r0, {r1, r4, r10}
+ mov r1, #0
+ str r5, [r0, #16]
+ str r12, [r0, #20]
+ adcs r7, r7, r6
+ mov r6, r12
+ adcs r11, r8, r2
+ str r7, [r0, #24]
+ mov r8, lr
+ adc r1, r1, #0
+ str r11, [r0, #28]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r3, {r1, r2, r9, r12, lr}
+ ldr r4, [r3, #20]
+ ldr r5, [r3, #24]
+ ldr r3, [r3, #28]
+ subs r1, r8, r1
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ sbcs r8, r1, r2
+ ldr r1, [sp, #20] @ 4-byte Reload
+ sbcs r2, r1, r9
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r12, r10, r12
+ sbcs lr, r1, lr
+ ldr r1, [sp, #12] @ 4-byte Reload
+ sbcs r4, r6, r4
+ sbcs r5, r7, r5
+ sbcs r6, r11, r3
+ sbc r3, r1, #0
+ tst r3, #1
+ bne .LBB120_2
+@ BB#1: @ %nocarry
+ ldr r1, [sp, #8] @ 4-byte Reload
+ stm r0, {r1, r8}
+ add r1, r0, #8
+ add r0, r0, #20
+ stm r1, {r2, r12, lr}
+ stm r0, {r4, r5, r6}
+.LBB120_2: @ %carry
+ add sp, sp, #28
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end120:
+ .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF8L
+ .align 2
+ .type mcl_fp_addNF8L,%function
+mcl_fp_addNF8L: @ @mcl_fp_addNF8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #32
+ sub sp, sp, #32
+ ldm r1, {r6, r8}
+ ldr r7, [r1, #8]
+ ldr r9, [r1, #28]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r1, #12]
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [r1, #20]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r1, #24]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldm r2, {r1, r4, r5, r12, lr}
+ ldr r10, [r2, #20]
+ ldr r11, [r2, #24]
+ ldr r2, [r2, #28]
+ adds r7, r1, r6
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r6, r4, r8
+ ldr r4, [sp, #20] @ 4-byte Reload
+ str r7, [sp, #4] @ 4-byte Spill
+ str r6, [sp, #8] @ 4-byte Spill
+ adcs r8, r5, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adcs r1, r12, r1
+ adcs r12, lr, r5
+ ldr r5, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ adcs lr, r10, r5
+ adcs r5, r11, r4
+ ldr r4, [r3, #4]
+ ldr r11, [r3, #16]
+ str lr, [sp, #24] @ 4-byte Spill
+ adc r10, r2, r9
+ ldr r2, [r3]
+ ldr r9, [r3, #12]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r3, #8]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r3, #20]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r3, #24]
+ ldr r3, [r3, #28]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [sp, #16] @ 4-byte Reload
+ subs r2, r7, r2
+ sbcs r7, r6, r4
+ ldr r4, [sp, #20] @ 4-byte Reload
+ sbcs r6, r8, r4
+ sbcs r9, r1, r9
+ ldr r1, [sp] @ 4-byte Reload
+ sbcs r4, r12, r11
+ mov r11, r12
+ sbcs r12, lr, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ sbcs lr, r5, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ sbc r3, r10, r3
+ cmp r3, #0
+ movlt r6, r8
+ movlt r2, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ movlt r7, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ cmp r3, #0
+ movlt r4, r11
+ movlt r9, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ stm r0, {r2, r7}
+ str r6, [r0, #8]
+ str r9, [r0, #12]
+ movlt r12, r1
+ cmp r3, #0
+ add r1, r0, #16
+ movlt lr, r5
+ movlt r3, r10
+ stm r1, {r4, r12, lr}
+ str r3, [r0, #28]
+ add sp, sp, #32
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end121:
+ .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub8L
+ .align 2
+ .type mcl_fp_sub8L,%function
+mcl_fp_sub8L: @ @mcl_fp_sub8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldm r2, {r12, lr}
+ ldr r4, [r2, #8]
+ ldr r9, [r2, #20]
+ ldr r10, [r2, #24]
+ add r8, r1, #12
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r4, [r2, #12]
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r2, #16]
+ ldr r2, [r2, #28]
+ str r4, [sp] @ 4-byte Spill
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm r1, {r4, r5, r11}
+ ldm r8, {r2, r7, r8}
+ ldr r6, [r1, #24]
+ ldr r1, [r1, #28]
+ subs r12, r4, r12
+ ldr r4, [sp, #12] @ 4-byte Reload
+ sbcs lr, r5, lr
+ sbcs r11, r11, r4
+ ldr r4, [sp, #8] @ 4-byte Reload
+ sbcs r2, r2, r4
+ ldr r4, [sp] @ 4-byte Reload
+ sbcs r4, r7, r4
+ ldr r7, [sp, #4] @ 4-byte Reload
+ stm r0, {r12, lr}
+ str r11, [r0, #8]
+ sbcs r5, r8, r9
+ sbcs r6, r6, r10
+ sbcs r7, r1, r7
+ add r1, r0, #12
+ stm r1, {r2, r4, r5, r6, r7}
+ mov r1, #0
+ sbc r1, r1, #0
+ tst r1, #1
+ beq .LBB122_2
+@ BB#1: @ %carry
+ ldr r1, [r3]
+ add r10, r3, #12
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [r3, #4]
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [r3, #8]
+ str r1, [sp] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r1, [r3, #24]
+ ldr r3, [r3, #28]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adds r1, r1, r12
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r12, r1, lr
+ ldr r1, [sp] @ 4-byte Reload
+ adcs lr, r1, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r2, r8, r2
+ adcs r4, r9, r4
+ adcs r5, r10, r5
+ adcs r6, r1, r6
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adc r3, r3, r7
+ stm r0, {r1, r12, lr}
+ add r1, r0, #12
+ stm r1, {r2, r4, r5, r6}
+ str r3, [r0, #28]
+.LBB122_2: @ %nocarry
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end122:
+ .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF8L
+ .align 2
+ .type mcl_fp_subNF8L,%function
+mcl_fp_subNF8L: @ @mcl_fp_subNF8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #36
+ sub sp, sp, #36
+ ldm r2, {r6, r8}
+ ldr r7, [r2, #8]
+ ldr r11, [r2, #12]
+ ldr r9, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #16]
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #32] @ 4-byte Spill
+ ldm r1, {r2, r4, r5, r12, lr}
+ ldr r10, [r1, #20]
+ ldr r7, [r1, #24]
+ ldr r1, [r1, #28]
+ subs r6, r2, r6
+ ldr r2, [sp, #20] @ 4-byte Reload
+ sbcs r8, r4, r8
+ ldr r4, [sp, #24] @ 4-byte Reload
+ str r6, [sp, #16] @ 4-byte Spill
+ sbcs r5, r5, r2
+ sbcs r2, r12, r11
+ ldr r11, [r3, #12]
+ sbcs r12, lr, r4
+ ldr r4, [sp, #28] @ 4-byte Reload
+ str r2, [sp, #20] @ 4-byte Spill
+ str r12, [sp, #24] @ 4-byte Spill
+ sbcs lr, r10, r4
+ ldr r4, [sp, #32] @ 4-byte Reload
+ ldr r10, [r3, #16]
+ str lr, [sp, #28] @ 4-byte Spill
+ sbcs r4, r7, r4
+ ldr r7, [r3]
+ sbc r1, r1, r9
+ ldr r9, [r3, #8]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r3, #4]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r3, #20]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r3, #24]
+ ldr r3, [r3, #28]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [sp, #4] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adds r7, r6, r3
+ ldr r3, [sp] @ 4-byte Reload
+ adcs r6, r8, r3
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adcs r9, r5, r9
+ adcs r11, r2, r11
+ adcs r2, r12, r10
+ ldr r10, [sp, #16] @ 4-byte Reload
+ adcs r12, lr, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs lr, r4, r3
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adc r3, r1, r3
+ cmp r1, #0
+ movge r9, r5
+ ldr r5, [sp, #20] @ 4-byte Reload
+ movge r7, r10
+ movge r6, r8
+ cmp r1, #0
+ str r7, [r0]
+ movge r11, r5
+ ldr r5, [sp, #24] @ 4-byte Reload
+ movge r2, r5
+ ldr r5, [sp, #28] @ 4-byte Reload
+ stmib r0, {r6, r9, r11}
+ movge r12, r5
+ cmp r1, #0
+ movge r3, r1
+ movge lr, r4
+ add r1, r0, #16
+ stm r1, {r2, r12, lr}
+ str r3, [r0, #28]
+ add sp, sp, #36
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end123:
+ .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add8L
+ .align 2
+ .type mcl_fpDbl_add8L,%function
+mcl_fpDbl_add8L: @ @mcl_fpDbl_add8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #68
+ sub sp, sp, #68
+ ldm r1, {r7, r9}
+ ldr r6, [r1, #8]
+ ldr r8, [r1, #12]
+ ldm r2, {r4, r12, lr}
+ ldr r5, [r2, #12]
+ adds r4, r4, r7
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r2, #32]
+ adcs r7, r12, r9
+ adcs r6, lr, r6
+ add lr, r1, #16
+ adcs r9, r5, r8
+ ldr r5, [r2, #28]
+ add r8, r2, #16
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r2, #36]
+ str r5, [sp, #28] @ 4-byte Spill
+ str r4, [sp, #40] @ 4-byte Spill
+ ldr r4, [r2, #40]
+ str r4, [sp, #44] @ 4-byte Spill
+ ldr r4, [r2, #44]
+ str r4, [sp, #48] @ 4-byte Spill
+ ldr r4, [r2, #48]
+ str r4, [sp, #52] @ 4-byte Spill
+ ldr r4, [r2, #52]
+ str r4, [sp, #56] @ 4-byte Spill
+ ldr r4, [r2, #56]
+ str r4, [sp, #60] @ 4-byte Spill
+ ldr r4, [r2, #60]
+ str r4, [sp, #64] @ 4-byte Spill
+ ldm r8, {r4, r5, r8}
+ ldr r2, [r1, #36]
+ ldr r10, [r1, #32]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #32] @ 4-byte Reload
+ adcs r1, r4, r1
+ str r11, [r0]
+ str r7, [r0, #4]
+ str r6, [r0, #8]
+ str r9, [r0, #12]
+ ldr r6, [sp, #8] @ 4-byte Reload
+ ldr r4, [sp, #16] @ 4-byte Reload
+ adcs r2, r5, r2
+ str r1, [r0, #16]
+ str r2, [r0, #20]
+ adcs r1, r8, r12
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, lr
+ adcs r1, r1, r10
+ str r2, [r0, #28]
+ ldr r2, [sp] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r7, r1, r2
+ ldr r1, [sp, #44] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ adcs r2, r1, r2
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r2, [sp, #44] @ 4-byte Spill
+ adcs r12, r1, r6
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r6, [sp, #12] @ 4-byte Reload
+ str r12, [sp, #48] @ 4-byte Spill
+ adcs lr, r1, r6
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str lr, [sp, #52] @ 4-byte Spill
+ adcs r5, r1, r4
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r4, [sp, #20] @ 4-byte Reload
+ str r5, [sp, #56] @ 4-byte Spill
+ adcs r8, r1, r4
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adcs r10, r1, r4
+ mov r1, #0
+ adc r1, r1, #0
+ str r10, [sp, #60] @ 4-byte Spill
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [r3]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldmib r3, {r4, r11}
+ ldr r6, [r3, #12]
+ ldr r1, [r3, #24]
+ ldr r9, [r3, #16]
+ str r6, [sp, #40] @ 4-byte Spill
+ ldr r6, [r3, #20]
+ ldr r3, [r3, #28]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [sp, #36] @ 4-byte Reload
+ subs r1, r3, r1
+ sbcs r4, r7, r4
+ sbcs r11, r2, r11
+ ldr r2, [sp, #40] @ 4-byte Reload
+ sbcs r2, r12, r2
+ sbcs r12, lr, r9
+ mov r9, r8
+ sbcs lr, r5, r6
+ ldr r5, [sp, #28] @ 4-byte Reload
+ sbcs r6, r8, r5
+ ldr r5, [sp, #32] @ 4-byte Reload
+ sbcs r8, r10, r5
+ ldr r5, [sp, #64] @ 4-byte Reload
+ sbc r10, r5, #0
+ ands r10, r10, #1
+ movne r1, r3
+ movne r4, r7
+ str r1, [r0, #32]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r4, [r0, #36]
+ movne r11, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ cmp r10, #0
+ str r11, [r0, #40]
+ movne r2, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r2, [r0, #44]
+ movne r12, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r12, [r0, #48]
+ movne lr, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ cmp r10, #0
+ movne r6, r9
+ str lr, [r0, #52]
+ str r6, [r0, #56]
+ movne r8, r1
+ str r8, [r0, #60]
+ add sp, sp, #68
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end124:
+ .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub8L
+ .align 2
+ .type mcl_fpDbl_sub8L,%function
+mcl_fpDbl_sub8L: @ @mcl_fpDbl_sub8L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #64
+ sub sp, sp, #64
+ ldr r7, [r2, #32]
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldm r2, {r4, r5, r8}
+ ldr r6, [r2, #20]
+ ldr r7, [r2, #12]
+ ldr r9, [r2, #16]
+ ldr r11, [r2, #24]
+ ldr r10, [r2, #28]
+ str r6, [sp, #28] @ 4-byte Spill
+ ldm r1, {r2, r12, lr}
+ ldr r6, [r1, #12]
+ subs r4, r2, r4
+ ldr r2, [r1, #32]
+ sbcs r5, r12, r5
+ ldr r12, [r1, #36]
+ sbcs lr, lr, r8
+ add r8, r1, #16
+ sbcs r6, r6, r7
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm r8, {r1, r2, r7, r8}
+ stm r0, {r4, r5, lr}
+ str r6, [r0, #12]
+ mov r4, #0
+ ldr r6, [sp, #28] @ 4-byte Reload
+ ldr r5, [sp, #20] @ 4-byte Reload
+ sbcs r1, r1, r9
+ sbcs r2, r2, r6
+ str r1, [r0, #16]
+ sbcs r1, r7, r11
+ str r2, [r0, #20]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ ldr r7, [sp, #8] @ 4-byte Reload
+ str r1, [r0, #24]
+ sbcs r1, r8, r10
+ str r1, [r0, #28]
+ ldr r1, [sp] @ 4-byte Reload
+ sbcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #4] @ 4-byte Reload
+ sbcs r6, r12, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r6, [sp, #36] @ 4-byte Spill
+ sbcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ sbcs r9, r7, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ ldr r7, [sp, #12] @ 4-byte Reload
+ sbcs r12, r7, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ ldr r7, [sp, #16] @ 4-byte Reload
+ str r12, [sp, #48] @ 4-byte Spill
+ sbcs lr, r7, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str lr, [sp, #52] @ 4-byte Spill
+ sbcs r8, r5, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ ldr r5, [sp, #24] @ 4-byte Reload
+ sbcs r11, r5, r2
+ sbc r2, r4, #0
+ str r2, [sp, #60] @ 4-byte Spill
+ ldm r3, {r4, r5}
+ ldr r2, [r3, #8]
+ ldr r10, [r3, #20]
+ ldr r7, [r3, #24]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r3, #12]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r3, #16]
+ ldr r3, [r3, #28]
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adds r4, r3, r4
+ adcs r5, r6, r5
+ ldr r6, [sp, #44] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r9, r1
+ adcs r2, r12, r2
+ adcs r12, lr, r10
+ adcs lr, r8, r7
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adc r10, r11, r7
+ ldr r7, [sp, #60] @ 4-byte Reload
+ ands r7, r7, #1
+ moveq r4, r3
+ ldr r3, [sp, #36] @ 4-byte Reload
+ str r4, [r0, #32]
+ moveq r5, r3
+ ldr r3, [sp, #40] @ 4-byte Reload
+ str r5, [r0, #36]
+ moveq r6, r3
+ cmp r7, #0
+ moveq r1, r9
+ str r6, [r0, #40]
+ str r1, [r0, #44]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r2, [r0, #48]
+ moveq r12, r1
+ cmp r7, #0
+ moveq lr, r8
+ moveq r10, r11
+ str r12, [r0, #52]
+ str lr, [r0, #56]
+ str r10, [r0, #60]
+ add sp, sp, #64
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end125:
+ .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv288x32,%function
+.LmulPv288x32: @ @mulPv288x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r1, [r1, #32]
+ umull r3, r7, r1, r2
+ adcs r1, r5, r3
+ adc r2, r7, #0
+ str r1, [r0, #32]
+ str r2, [r0, #36]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end126:
+ .size .LmulPv288x32, .Lfunc_end126-.LmulPv288x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre9L
+ .align 2
+ .type mcl_fp_mulUnitPre9L,%function
+mcl_fp_mulUnitPre9L: @ @mcl_fp_mulUnitPre9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ .pad #40
+ sub sp, sp, #40
+ mov r4, r0
+ mov r0, sp
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #20
+ ldr r12, [sp, #36]
+ ldm lr, {r0, r3, r8, lr}
+ ldr r1, [sp, #16]
+ ldm sp, {r5, r6, r7}
+ ldr r2, [sp, #12]
+ stm r4, {r5, r6, r7}
+ str r2, [r4, #12]
+ str r1, [r4, #16]
+ add r1, r4, #20
+ stm r1, {r0, r3, r8, lr}
+ str r12, [r4, #36]
+ add sp, sp, #40
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end127:
+ .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre9L
+ .align 2
+ .type mcl_fpDbl_mulPre9L,%function
+mcl_fpDbl_mulPre9L: @ @mcl_fpDbl_mulPre9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #412
+ sub sp, sp, #412
+ mov r10, r2
+ mov r8, r0
+ add r0, sp, #368
+ str r1, [sp, #44] @ 4-byte Spill
+ mov r4, r1
+ ldr r2, [r10]
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #404]
+ ldr r1, [sp, #376]
+ ldr r2, [r10, #4]
+ ldr r9, [sp, #372]
+ ldr r11, [sp, #380]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #400]
+ str r1, [sp, #16] @ 4-byte Spill
+ mov r1, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #396]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #392]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #388]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #384]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [r8]
+ add r0, sp, #328
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #352
+ ldr r4, [sp, #364]
+ add r7, sp, #332
+ ldm lr, {r3, r12, lr}
+ ldr r6, [sp, #328]
+ ldm r7, {r0, r1, r2, r5, r7}
+ adds r6, r6, r9
+ str r6, [r8, #4]
+ ldr r6, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #12] @ 4-byte Spill
+ adcs r0, r1, r11
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r10, #8]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r4, #0
+ ldr r4, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #288
+ mov r1, r4
+ bl .LmulPv288x32(PLT)
+ add r9, sp, #312
+ add lr, sp, #288
+ ldm r9, {r5, r6, r7, r9}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r8, #8]
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ mov r1, r4
+ adcs r0, r2, r0
+ ldr r2, [r10, #12]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ adc r0, r9, #0
+ mov r9, r4
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #248
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #272
+ ldr r4, [sp, #284]
+ add r6, sp, #252
+ ldm lr, {r3, r12, lr}
+ ldr r7, [sp, #248]
+ ldr r5, [sp, #268]
+ ldm r6, {r0, r1, r2, r6}
+ adds r7, r7, r11
+ str r7, [r8, #12]
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r11, r0, r7
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r10, #16]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ adc r0, r4, #0
+ mov r4, r9
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #208
+ mov r1, r4
+ bl .LmulPv288x32(PLT)
+ add r9, sp, #232
+ add lr, sp, #208
+ ldm r9, {r5, r6, r7, r9}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r0, r11
+ str r0, [r8, #16]
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ mov r1, r4
+ adcs r0, r2, r0
+ ldr r2, [r10, #20]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ adc r0, r9, #0
+ mov r9, r4
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #168
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #192
+ ldr r4, [sp, #204]
+ add r6, sp, #172
+ ldm lr, {r3, r12, lr}
+ ldr r7, [sp, #168]
+ ldr r5, [sp, #188]
+ ldm r6, {r0, r1, r2, r6}
+ adds r7, r7, r11
+ str r7, [r8, #20]
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r11, r0, r7
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r10, #24]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #12] @ 4-byte Spill
+ adc r0, r4, #0
+ mov r4, r9
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #128
+ mov r1, r4
+ bl .LmulPv288x32(PLT)
+ add r9, sp, #152
+ add lr, sp, #128
+ ldm r9, {r5, r6, r7, r9}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r0, r11
+ str r0, [r8, #24]
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ mov r1, r4
+ adcs r0, r2, r0
+ ldr r2, [r10, #28]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #88
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #124]
+ add lr, sp, #112
+ add r7, sp, #92
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r5, r12, lr}
+ ldr r2, [sp, #88]
+ ldr r6, [sp, #108]
+ ldm r7, {r0, r1, r3, r7}
+ ldr r4, [sp, #40] @ 4-byte Reload
+ adds r2, r2, r11
+ adcs r9, r0, r4
+ ldr r0, [sp, #36] @ 4-byte Reload
+ str r2, [r8, #28]
+ ldr r2, [r10, #32]
+ adcs r10, r1, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r11, r3, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r7, r7, r0
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r6, r6, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r5, r5, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r4, r0, #0
+ add r0, sp, #48
+ bl .LmulPv288x32(PLT)
+ add r3, sp, #48
+ ldm r3, {r0, r1, r2, r3}
+ ldr r12, [sp, #84]
+ ldr lr, [sp, #80]
+ adds r0, r0, r9
+ ldr r9, [sp, #76]
+ adcs r1, r1, r10
+ adcs r2, r2, r11
+ ldr r11, [sp, #72]
+ adcs r10, r3, r7
+ ldr r7, [sp, #64]
+ ldr r3, [sp, #68]
+ str r0, [r8, #32]
+ str r1, [r8, #36]
+ str r2, [r8, #40]
+ str r10, [r8, #44]
+ adcs r0, r7, r6
+ str r0, [r8, #48]
+ adcs r0, r3, r5
+ str r0, [r8, #52]
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [r8, #56]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [r8, #60]
+ adcs r0, lr, r4
+ adc r1, r12, #0
+ str r0, [r8, #64]
+ str r1, [r8, #68]
+ add sp, sp, #412
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end128:
+ .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre9L
+ .align 2
+ .type mcl_fpDbl_sqrPre9L,%function
+mcl_fpDbl_sqrPre9L: @ @mcl_fpDbl_sqrPre9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #412
+ sub sp, sp, #412
+ mov r5, r1
+ mov r4, r0
+ add r0, sp, #368
+ ldr r2, [r5]
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #404]
+ add r11, sp, #368
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #400]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #396]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #392]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #388]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #384]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r0, r10, r11}
+ ldr r1, [sp, #380]
+ ldr r2, [r5, #4]
+ str r1, [sp, #20] @ 4-byte Spill
+ str r0, [r4]
+ add r0, sp, #328
+ mov r1, r5
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #348
+ add r7, sp, #328
+ ldr r9, [sp, #364]
+ ldr r8, [sp, #360]
+ ldm lr, {r6, r12, lr}
+ ldm r7, {r0, r1, r2, r3, r7}
+ adds r0, r0, r10
+ str r0, [r4, #4]
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r10, r1, r11
+ mov r1, r5
+ adcs r11, r2, r0
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r2, [r5, #8]
+ adcs r0, r3, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #288
+ bl .LmulPv288x32(PLT)
+ add r9, sp, #312
+ add lr, sp, #288
+ ldm r9, {r6, r7, r8, r9}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r0, r10
+ str r0, [r4, #8]
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r10, r1, r11
+ mov r1, r5
+ adcs r11, r2, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r2, [r5, #12]
+ adcs r0, r3, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #248
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #268
+ add r7, sp, #248
+ ldr r9, [sp, #284]
+ ldr r8, [sp, #280]
+ ldm lr, {r6, r12, lr}
+ ldm r7, {r0, r1, r2, r3, r7}
+ adds r0, r0, r10
+ str r0, [r4, #12]
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r10, r1, r11
+ mov r1, r5
+ adcs r11, r2, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r2, [r5, #16]
+ adcs r0, r3, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #208
+ bl .LmulPv288x32(PLT)
+ add r9, sp, #232
+ add lr, sp, #208
+ ldm r9, {r6, r7, r8, r9}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r0, r10
+ str r0, [r4, #16]
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r10, r1, r11
+ mov r1, r5
+ adcs r11, r2, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r2, [r5, #20]
+ adcs r0, r3, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #168
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #188
+ add r7, sp, #168
+ ldr r9, [sp, #204]
+ ldr r8, [sp, #200]
+ ldm lr, {r6, r12, lr}
+ ldm r7, {r0, r1, r2, r3, r7}
+ adds r0, r0, r10
+ str r0, [r4, #20]
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r10, r1, r11
+ mov r1, r5
+ adcs r11, r2, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ ldr r2, [r5, #24]
+ adcs r0, r3, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #128
+ bl .LmulPv288x32(PLT)
+ add r9, sp, #152
+ add lr, sp, #128
+ ldm r9, {r6, r7, r8, r9}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r0, r10
+ str r0, [r4, #24]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r10, r1, r11
+ mov r1, r5
+ adcs r11, r2, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r2, [r5, #28]
+ adcs r0, r3, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #88
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #124]
+ ldr r2, [sp, #88]
+ ldr r1, [sp, #92]
+ add r12, sp, #96
+ ldr lr, [sp, #116]
+ ldr r6, [sp, #112]
+ ldr r7, [sp, #108]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #120]
+ adds r2, r2, r10
+ adcs r10, r1, r11
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r12, {r0, r3, r12}
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r2, [r4, #28]
+ ldr r2, [r5, #32]
+ adcs r11, r0, r1
+ ldr r0, [sp, #40] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r8, r3, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r9, r12, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #48
+ bl .LmulPv288x32(PLT)
+ add r3, sp, #48
+ add lr, sp, #72
+ ldm r3, {r0, r1, r2, r3}
+ ldr r12, [sp, #84]
+ adds r0, r0, r10
+ adcs r1, r1, r11
+ adcs r2, r2, r8
+ ldm lr, {r5, r8, lr}
+ ldr r6, [sp, #68]
+ ldr r7, [sp, #64]
+ adcs r3, r3, r9
+ add r9, r4, #32
+ stm r9, {r0, r1, r2}
+ str r3, [r4, #44]
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [r4, #48]
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [r4, #52]
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [r4, #56]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [r4, #60]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, lr, r0
+ adc r1, r12, #0
+ str r0, [r4, #64]
+ str r1, [r4, #68]
+ add sp, sp, #412
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end129:
+ .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont9L
+ .align 2
+ .type mcl_fp_mont9L,%function
+mcl_fp_mont9L: @ @mcl_fp_mont9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #804
+ sub sp, sp, #804
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r6, [r3, #-4]
+ ldr r2, [r2]
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #760
+ str r3, [sp, #76] @ 4-byte Spill
+ str r1, [sp, #68] @ 4-byte Spill
+ mov r4, r3
+ mov r7, r1
+ str r6, [sp, #72] @ 4-byte Spill
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #764]
+ ldr r5, [sp, #760]
+ mov r1, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #768]
+ mul r2, r5, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #772]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #784]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #780]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #776]
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #720
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #756]
+ add r11, sp, #724
+ ldr r4, [sp, #736]
+ ldr r9, [sp, #720]
+ mov r1, r7
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #752]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #748]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #744]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #740]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r8, r10, r11}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ add r0, sp, #680
+ ldr r2, [r6, #4]
+ bl .LmulPv288x32(PLT)
+ adds r0, r9, r5
+ ldr r2, [sp, #4] @ 4-byte Reload
+ mov r1, #0
+ add lr, sp, #680
+ ldr r9, [sp, #716]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r5, r8, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #712]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r11, r0
+ ldr r11, [sp, #708]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #704]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r7, r2, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ adc r8, r1, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r5, r5, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r8, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r5, r0
+ add r0, sp, #640
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #676]
+ add r10, sp, #640
+ ldr r11, [sp, #660]
+ ldr r7, [sp, #656]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #672]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #664]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldr r2, [r6, #8]
+ ldr r6, [sp, #68] @ 4-byte Reload
+ add r0, sp, #600
+ mov r1, r6
+ bl .LmulPv288x32(PLT)
+ adds r0, r5, r4
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #600
+ ldr r4, [sp, #624]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #636]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #632]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #628]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r8, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r5, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r8, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r5, r0
+ add r0, sp, #560
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #596]
+ add r10, sp, #560
+ ldr r11, [sp, #580]
+ ldr r7, [sp, #576]
+ mov r1, r6
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #592]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #520
+ bl .LmulPv288x32(PLT)
+ adds r0, r5, r4
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #520
+ ldr r4, [sp, #544]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #556]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #552]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #548]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r8, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r5, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r6, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r8, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r5, r0
+ add r0, sp, #480
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #516]
+ add r10, sp, #480
+ ldr r11, [sp, #500]
+ ldr r7, [sp, #496]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #512]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ add r0, sp, #440
+ ldr r2, [r6, #16]
+ bl .LmulPv288x32(PLT)
+ adds r0, r5, r4
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #440
+ ldr r4, [sp, #464]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #476]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #472]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #468]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r8, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r5, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r8, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r5, r0
+ add r0, sp, #400
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #436]
+ add r10, sp, #400
+ ldr r11, [sp, #420]
+ ldr r7, [sp, #416]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #432]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #428]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #424]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldr r2, [r6, #20]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ add r0, sp, #360
+ bl .LmulPv288x32(PLT)
+ adds r0, r5, r4
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #360
+ ldr r4, [sp, #384]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #396]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #392]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #388]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r8, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r5, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r6, lr
+ ldr r6, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ mul r2, r5, r6
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r8, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #320
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #356]
+ add r10, sp, #320
+ ldr r11, [sp, #340]
+ ldr r7, [sp, #336]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #352]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #348]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #344]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #280
+ bl .LmulPv288x32(PLT)
+ adds r0, r5, r4
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #280
+ ldr r4, [sp, #304]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #316]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #312]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #308]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r8, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r5, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r5, r6
+ ldr r6, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ mov r1, r6
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r8, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #240
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #276]
+ add r10, sp, #240
+ ldr r11, [sp, #260]
+ ldr r7, [sp, #256]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #264]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #200
+ bl .LmulPv288x32(PLT)
+ adds r0, r5, r4
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #200
+ ldr r4, [sp, #224]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #236]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #232]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #228]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r8, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r5, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ adcs r0, r8, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r5, r0
+ add r0, sp, #160
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #196]
+ add r10, sp, #160
+ ldr r11, [sp, #184]
+ ldr r6, [sp, #180]
+ ldr r7, [sp, #176]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #120
+ bl .LmulPv288x32(PLT)
+ adds r0, r5, r4
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r2, [sp, #120]
+ ldr lr, [sp, #124]
+ ldr r5, [sp, #128]
+ ldr r12, [sp, #132]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r4, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r9, r0, r9
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r10, r0, r10
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ add r7, sp, #136
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r11, r0, r11
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r4, r4, r2
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r9, r9, lr
+ adcs r10, r10, r5
+ mul r8, r4, r0
+ ldm r7, {r0, r1, r2, r3, r6, r7}
+ ldr r5, [sp, #68] @ 4-byte Reload
+ adcs r5, r5, r12
+ str r5, [sp, #36] @ 4-byte Spill
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs r5, r5, r0
+ adcs r0, r11, r1
+ ldr r11, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mov r1, r11
+ adcs r0, r0, r2
+ mov r2, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #80
+ bl .LmulPv288x32(PLT)
+ add r3, sp, #80
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r4, r0
+ adcs r0, r9, r1
+ ldr r1, [sp, #96]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r7, r10, r2
+ str r7, [sp, #40] @ 4-byte Spill
+ adcs r8, r0, r3
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r10, r5, r1
+ ldr r1, [sp, #100]
+ adcs r4, r0, r1
+ ldr r1, [sp, #104]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r4, [sp, #44] @ 4-byte Spill
+ adcs r6, r0, r1
+ ldr r1, [sp, #108]
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r6, [sp, #48] @ 4-byte Spill
+ adcs r12, r0, r1
+ ldr r1, [sp, #112]
+ ldr r0, [sp, #32] @ 4-byte Reload
+ str r12, [sp, #56] @ 4-byte Spill
+ adcs lr, r0, r1
+ ldr r1, [sp, #116]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str lr, [sp, #68] @ 4-byte Spill
+ adcs r5, r0, r1
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r5, [sp, #72] @ 4-byte Spill
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, r11
+ ldmib r0, {r2, r3, r11}
+ ldr r1, [r0, #16]
+ ldr r9, [r0]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [r0, #20]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r0, #24]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [r0, #28]
+ str r1, [sp, #36] @ 4-byte Spill
+ mov r1, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ subs r9, r0, r9
+ sbcs r2, r7, r2
+ sbcs r3, r8, r3
+ sbcs r7, r10, r11
+ ldr r11, [r1, #32]
+ ldr r1, [sp, #24] @ 4-byte Reload
+ sbcs r1, r4, r1
+ ldr r4, [sp, #28] @ 4-byte Reload
+ sbcs r4, r6, r4
+ ldr r6, [sp, #32] @ 4-byte Reload
+ sbcs r12, r12, r6
+ ldr r6, [sp, #36] @ 4-byte Reload
+ sbcs lr, lr, r6
+ sbcs r11, r5, r11
+ ldr r5, [sp, #64] @ 4-byte Reload
+ sbc r6, r5, #0
+ ldr r5, [sp, #60] @ 4-byte Reload
+ ands r6, r6, #1
+ movne r9, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ movne r3, r8
+ str r9, [r5]
+ movne r2, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ cmp r6, #0
+ movne r7, r10
+ str r2, [r5, #4]
+ str r3, [r5, #8]
+ str r7, [r5, #12]
+ movne r1, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ str r1, [r5, #16]
+ movne r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ cmp r6, #0
+ str r4, [r5, #20]
+ movne r12, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r12, [r5, #24]
+ movne lr, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str lr, [r5, #28]
+ movne r11, r0
+ str r11, [r5, #32]
+ add sp, sp, #804
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end130:
+ .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF9L
+ .align 2
+ .type mcl_fp_montNF9L,%function
+mcl_fp_montNF9L: @ @mcl_fp_montNF9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #804
+ sub sp, sp, #804
+ add r12, sp, #60
+ str r2, [sp, #72] @ 4-byte Spill
+ mov r4, r3
+ mov r7, r1
+ stm r12, {r0, r1, r3}
+ add r0, sp, #760
+ ldr r6, [r3, #-4]
+ ldr r2, [r2]
+ str r6, [sp, #76] @ 4-byte Spill
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #764]
+ ldr r5, [sp, #760]
+ mov r1, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #768]
+ mul r2, r5, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #772]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #784]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #780]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #776]
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #720
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #756]
+ add r10, sp, #724
+ ldr r6, [sp, #736]
+ ldr r11, [sp, #720]
+ mov r1, r7
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #752]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #748]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #744]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #740]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r4, [sp, #72] @ 4-byte Reload
+ add r0, sp, #680
+ ldr r2, [r4, #4]
+ bl .LmulPv288x32(PLT)
+ adds r0, r11, r5
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #680
+ ldr r11, [sp, #704]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #716]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #712]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #708]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r5, r1, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r7, r1, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r1, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adds r6, r6, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r5, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r8, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #640
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #676]
+ add r10, sp, #644
+ ldr r7, [sp, #656]
+ ldr r11, [sp, #640]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #672]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #664]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #660]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r5, [sp, #64] @ 4-byte Reload
+ ldr r2, [r4, #8]
+ add r0, sp, #600
+ mov r1, r5
+ bl .LmulPv288x32(PLT)
+ adds r0, r6, r11
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #600
+ ldr r11, [sp, #624]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r4, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r8, [sp, #636]
+ adcs r0, r0, r9
+ ldr r9, [sp, #632]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #628]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r6, r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r8, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #560
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #596]
+ add r10, sp, #564
+ ldr r7, [sp, #576]
+ ldr r11, [sp, #560]
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #592]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #520
+ bl .LmulPv288x32(PLT)
+ adds r0, r6, r11
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #520
+ ldr r11, [sp, #544]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r6, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r8, [sp, #556]
+ adcs r0, r0, r9
+ ldr r9, [sp, #552]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #548]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r5, r0, r1
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r6, r6, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r5, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r8, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #480
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #516]
+ add r10, sp, #484
+ ldr r7, [sp, #496]
+ ldr r11, [sp, #480]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #512]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #500]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r5, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #64] @ 4-byte Reload
+ add r0, sp, #440
+ ldr r2, [r5, #16]
+ bl .LmulPv288x32(PLT)
+ adds r0, r6, r11
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #440
+ ldr r11, [sp, #464]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r4, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r8, [sp, #476]
+ adcs r0, r0, r9
+ ldr r9, [sp, #472]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #468]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r6, r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r4, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r6, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r8, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #400
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #436]
+ add r10, sp, #404
+ ldr r7, [sp, #416]
+ ldr r11, [sp, #400]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #432]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #428]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #424]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #420]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r2, [r5, #20]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ add r0, sp, #360
+ bl .LmulPv288x32(PLT)
+ adds r0, r6, r11
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #360
+ ldr r11, [sp, #384]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r8, [sp, #396]
+ adcs r0, r0, r9
+ ldr r9, [sp, #392]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #388]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r6, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r6, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r8, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #320
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #356]
+ add r10, sp, #324
+ ldr r7, [sp, #336]
+ ldr r11, [sp, #320]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #352]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #348]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #344]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #340]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r5, [sp, #64] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #280
+ mov r1, r5
+ bl .LmulPv288x32(PLT)
+ adds r0, r6, r11
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #280
+ ldr r11, [sp, #304]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r4, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r8, [sp, #316]
+ adcs r0, r0, r9
+ ldr r9, [sp, #312]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #308]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r6, r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r8, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #240
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #276]
+ add r10, sp, #244
+ ldr r7, [sp, #256]
+ ldr r11, [sp, #240]
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #264]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #200
+ bl .LmulPv288x32(PLT)
+ adds r0, r6, r11
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #200
+ ldr r11, [sp, #224]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r5, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r8, [sp, #236]
+ adcs r0, r0, r9
+ ldr r9, [sp, #232]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #228]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r5, r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r6, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ adcs r0, r7, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r8, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ mul r2, r5, r0
+ add r0, sp, #160
+ bl .LmulPv288x32(PLT)
+ ldr r0, [sp, #196]
+ add r10, sp, #164
+ ldr r4, [sp, #184]
+ ldr r6, [sp, #180]
+ ldr r7, [sp, #176]
+ ldr r11, [sp, #160]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #120
+ bl .LmulPv288x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #120
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #136
+ adcs r1, r1, r9
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r10, r1, r10
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r11, r1, r7
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adc r1, r1, r2
+ str r1, [sp, #44] @ 4-byte Spill
+ ldm lr, {r2, r12, lr}
+ ldr r4, [sp, #132]
+ adds r5, r0, r2
+ ldr r0, [sp, #76] @ 4-byte Reload
+ mul r9, r5, r0
+ ldm r8, {r0, r1, r2, r3, r6, r8}
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adcs r7, r7, r12
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r7, r10, lr
+ ldr r10, [sp, #68] @ 4-byte Reload
+ adcs r11, r11, r4
+ ldr r4, [sp, #72] @ 4-byte Reload
+ str r7, [sp, #36] @ 4-byte Spill
+ adcs r0, r4, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ mov r2, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r4, r0, r3
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ adc r0, r8, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #80
+ bl .LmulPv288x32(PLT)
+ add r3, sp, #80
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r5, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r9, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #96]
+ str r9, [sp, #32] @ 4-byte Spill
+ adcs r2, r0, r2
+ adcs r0, r11, r3
+ str r2, [sp, #44] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r1, [sp, #100]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r7, [sp, #48] @ 4-byte Spill
+ adcs r6, r0, r1
+ ldr r1, [sp, #104]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs lr, r0, r1
+ ldr r1, [sp, #108]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str lr, [sp, #56] @ 4-byte Spill
+ adcs r4, r4, r1
+ ldr r1, [sp, #112]
+ str r4, [sp, #64] @ 4-byte Spill
+ adcs r5, r0, r1
+ ldr r1, [sp, #116]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r5, [sp, #76] @ 4-byte Spill
+ adc r12, r0, r1
+ mov r0, r10
+ ldr r1, [r0, #16]
+ ldr r8, [r0]
+ ldr r11, [r0, #4]
+ ldr r10, [r0, #8]
+ ldr r3, [r0, #12]
+ str r12, [sp, #72] @ 4-byte Spill
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [r0, #20]
+ subs r8, r9, r8
+ ldr r9, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r0, #24]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [r0, #28]
+ ldr r0, [r0, #32]
+ str r1, [sp, #40] @ 4-byte Spill
+ sbcs r1, r2, r11
+ sbcs r2, r9, r10
+ mov r10, r6
+ sbcs r3, r7, r3
+ ldr r7, [sp, #24] @ 4-byte Reload
+ sbcs r7, r6, r7
+ ldr r6, [sp, #28] @ 4-byte Reload
+ sbcs r11, lr, r6
+ ldr r6, [sp, #36] @ 4-byte Reload
+ sbcs lr, r4, r6
+ ldr r4, [sp, #40] @ 4-byte Reload
+ ldr r6, [sp, #44] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #32] @ 4-byte Reload
+ sbc r0, r12, r0
+ asr r12, r0, #31
+ cmp r12, #0
+ movlt r8, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ movlt r1, r6
+ movlt r2, r9
+ cmp r12, #0
+ movlt r7, r10
+ str r8, [r5]
+ str r1, [r5, #4]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r2, [r5, #8]
+ movlt r3, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r3, [r5, #12]
+ str r7, [r5, #16]
+ movlt r11, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ cmp r12, #0
+ str r11, [r5, #20]
+ movlt lr, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str lr, [r5, #24]
+ movlt r4, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r4, [r5, #28]
+ movlt r0, r1
+ str r0, [r5, #32]
+ add sp, sp, #804
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end131:
+ .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed9L
+ .align 2
+ .type mcl_fp_montRed9L,%function
+mcl_fp_montRed9L: @ @mcl_fp_montRed9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #476
+ sub sp, sp, #476
+ mov r5, r2
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r4, [r1]
+ ldr r11, [r1, #32]
+ ldr r10, [r1, #36]
+ ldr r0, [r5]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r5, #4]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r5, #8]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #16]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r5, #12]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r1, #20]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r5, #16]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #24]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r5, #20]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #28]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r5, #24]
+ str r2, [sp, #44] @ 4-byte Spill
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r5, #-4]
+ str r0, [sp, #108] @ 4-byte Spill
+ mul r2, r4, r0
+ ldr r0, [r5, #28]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r5, #32]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ mov r1, r5
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #432
+ bl .LmulPv288x32(PLT)
+ ldr r1, [sp, #432]
+ add lr, sp, #436
+ ldr r9, [sp, #468]
+ ldr r8, [sp, #464]
+ ldm lr, {r0, r2, r3, r6, r7, r12, lr}
+ adds r1, r4, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r4, r1, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ mul r2, r4, r7
+ adcs r0, r0, r12
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r11, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r9, r10, r9
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #4] @ 4-byte Spill
+ add r0, sp, #392
+ bl .LmulPv288x32(PLT)
+ add r11, sp, #408
+ add r6, sp, #392
+ ldr r12, [sp, #428]
+ ldr lr, [sp, #424]
+ ldr r8, [sp, #420]
+ ldm r11, {r2, r10, r11}
+ ldm r6, {r0, r1, r3, r6}
+ adds r0, r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r4, r0, r1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r4, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r11, r0, r11
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r0, r9, lr
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #352
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #372
+ add r7, sp, #352
+ ldr r10, [sp, #388]
+ ldr r9, [sp, #384]
+ ldm lr, {r6, r12, lr}
+ ldm r7, {r0, r1, r2, r3, r7}
+ adds r0, r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r4, r0, r1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r2
+ mul r2, r4, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r11, r6
+ mov r11, r8
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #312
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #332
+ ldr r7, [sp, #348]
+ add r9, sp, #320
+ ldm lr, {r6, r8, r12, lr}
+ ldr r1, [sp, #312]
+ ldr r3, [sp, #316]
+ ldm r9, {r0, r2, r9}
+ adds r1, r4, r1
+ mov r4, r11
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r10, r1, r3
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #272
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #272
+ ldr r11, [sp, #308]
+ ldr r9, [sp, #304]
+ ldm lr, {r0, r1, r2, r3, r6, r7, r12, lr}
+ adds r0, r10, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r2
+ mul r2, r8, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ mov r6, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #232
+ bl .LmulPv288x32(PLT)
+ add r11, sp, #256
+ add lr, sp, #232
+ ldm r11, {r7, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r8, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r4, r0, r1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r2
+ mul r2, r4, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #192
+ bl .LmulPv288x32(PLT)
+ add lr, sp, #212
+ add r7, sp, #192
+ ldr r9, [sp, #228]
+ ldr r8, [sp, #224]
+ ldm lr, {r6, r12, lr}
+ ldm r7, {r0, r1, r2, r3, r7}
+ adds r0, r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r4, r0, r1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r10, r0, r2
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r11, r0, r3
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r7, r0, r7
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r6, r0, r6
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ mul r2, r4, r8
+ adcs r9, r0, r9
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #152
+ bl .LmulPv288x32(PLT)
+ add r12, sp, #152
+ ldm r12, {r0, r1, r3, r12}
+ ldr lr, [sp, #188]
+ adds r0, r4, r0
+ adcs r4, r10, r1
+ ldr r1, [sp, #168]
+ adcs r11, r11, r3
+ mul r2, r4, r8
+ ldr r3, [sp, #180]
+ adcs r0, r7, r12
+ ldr r7, [sp, #176]
+ ldr r12, [sp, #184]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #172]
+ adcs r10, r6, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r8, r1, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ mov r1, r5
+ adcs r7, r0, r7
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r9, r12
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #112
+ bl .LmulPv288x32(PLT)
+ add r3, sp, #112
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r4, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r6, r11, r1
+ ldr r1, [sp, #128]
+ adcs r9, r0, r2
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r11, r10, r3
+ adcs lr, r8, r1
+ ldr r1, [sp, #132]
+ str r11, [sp, #28] @ 4-byte Spill
+ str lr, [sp, #32] @ 4-byte Spill
+ adcs r7, r7, r1
+ ldr r1, [sp, #136]
+ str r7, [sp, #44] @ 4-byte Spill
+ adcs r8, r0, r1
+ ldr r1, [sp, #140]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ str r8, [sp, #48] @ 4-byte Spill
+ adcs r4, r0, r1
+ ldr r1, [sp, #144]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r4, [sp, #52] @ 4-byte Spill
+ adcs r5, r0, r1
+ ldr r1, [sp, #148]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r5, [sp, #108] @ 4-byte Spill
+ adcs r12, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r10, r0, #0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ subs r2, r6, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ sbcs r3, r9, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ sbcs r1, r11, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r11, lr, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #84] @ 4-byte Reload
+ sbcs lr, r8, r7
+ ldr r7, [sp, #88] @ 4-byte Reload
+ sbcs r8, r4, r7
+ ldr r4, [sp, #68] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #72] @ 4-byte Reload
+ sbcs r5, r12, r5
+ sbc r7, r10, #0
+ ands r7, r7, #1
+ movne r2, r6
+ ldr r6, [sp, #104] @ 4-byte Reload
+ movne r3, r9
+ str r2, [r6]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r3, [r6, #4]
+ movne r1, r2
+ cmp r7, #0
+ str r1, [r6, #8]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ movne r11, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r11, [r6, #12]
+ movne r0, r1
+ str r0, [r6, #16]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ movne lr, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ cmp r7, #0
+ movne r5, r12
+ str lr, [r6, #20]
+ movne r8, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ str r8, [r6, #24]
+ movne r4, r0
+ str r4, [r6, #28]
+ str r5, [r6, #32]
+ add sp, sp, #476
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end132:
+ .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre9L
+ .align 2
+ .type mcl_fp_addPre9L,%function
+mcl_fp_addPre9L: @ @mcl_fp_addPre9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldm r1, {r3, r12, lr}
+ ldr r9, [r1, #12]
+ ldmib r2, {r5, r6, r7}
+ ldr r4, [r2, #16]
+ ldr r8, [r2]
+ ldr r11, [r2, #28]
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ adds r10, r8, r3
+ adcs r5, r5, r12
+ ldr r12, [r1, #32]
+ ldr r8, [sp, #12] @ 4-byte Reload
+ str r10, [r0]
+ adcs lr, r6, lr
+ ldr r6, [r1, #20]
+ adcs r7, r7, r9
+ str r4, [sp, #4] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ ldr r2, [r2, #32]
+ ldr r3, [sp, #4] @ 4-byte Reload
+ str r4, [sp] @ 4-byte Spill
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #28]
+ ldr r4, [r1, #24]
+ ldr r1, [r1, #16]
+ adcs r1, r8, r1
+ adcs r6, r3, r6
+ ldr r3, [sp] @ 4-byte Reload
+ stmib r0, {r5, lr}
+ str r7, [r0, #12]
+ str r1, [r0, #16]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r6, [r0, #20]
+ adcs r4, r3, r4
+ adcs r2, r11, r2
+ str r4, [r0, #24]
+ adcs r1, r1, r12
+ str r2, [r0, #28]
+ str r1, [r0, #32]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end133:
+ .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre9L
+ .align 2
+ .type mcl_fp_subPre9L,%function
+mcl_fp_subPre9L: @ @mcl_fp_subPre9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #20
+ sub sp, sp, #20
+ ldr r3, [r2, #8]
+ add lr, r1, #16
+ ldr r11, [r2, #4]
+ ldr r10, [r2, #12]
+ ldr r4, [r2]
+ str r3, [sp] @ 4-byte Spill
+ ldr r3, [r2, #16]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [r2, #20]
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [r2, #24]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ str r3, [sp, #16] @ 4-byte Spill
+ ldmib r1, {r5, r6, r7}
+ ldm lr, {r3, r12, lr}
+ ldr r9, [r1]
+ ldr r8, [r1, #28]
+ subs r4, r9, r4
+ ldr r9, [r2, #32]
+ ldr r2, [sp] @ 4-byte Reload
+ sbcs r11, r5, r11
+ ldr r5, [sp, #16] @ 4-byte Reload
+ sbcs r6, r6, r2
+ sbcs r7, r7, r10
+ ldr r10, [r1, #32]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ sbcs r3, r3, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ sbcs r2, r12, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ stm r0, {r4, r11}
+ str r6, [r0, #8]
+ str r7, [r0, #12]
+ str r3, [r0, #16]
+ str r2, [r0, #20]
+ sbcs r1, lr, r1
+ sbcs r5, r8, r5
+ str r1, [r0, #24]
+ sbcs r1, r10, r9
+ str r5, [r0, #28]
+ str r1, [r0, #32]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #20
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end134:
+ .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_9L
+ .align 2
+ .type mcl_fp_shr1_9L,%function
+mcl_fp_shr1_9L: @ @mcl_fp_shr1_9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, lr}
+ push {r4, r5, r6, r7, r8, lr}
+ add r12, r1, #16
+ ldr r2, [r1, #8]
+ ldr lr, [r1, #12]
+ ldm r12, {r4, r5, r6, r8, r12}
+ ldm r1, {r1, r3}
+ lsrs r7, r3, #1
+ rrx r1, r1
+ str r1, [r0]
+ lsr r1, r3, #1
+ orr r1, r1, r2, lsl #31
+ str r1, [r0, #4]
+ lsrs r1, lr, #1
+ rrx r1, r2
+ str r1, [r0, #8]
+ lsr r1, lr, #1
+ orr r1, r1, r4, lsl #31
+ str r1, [r0, #12]
+ lsrs r1, r5, #1
+ rrx r1, r4
+ str r1, [r0, #16]
+ lsr r1, r5, #1
+ orr r1, r1, r6, lsl #31
+ str r1, [r0, #20]
+ lsrs r1, r8, #1
+ rrx r1, r6
+ str r1, [r0, #24]
+ lsr r1, r8, #1
+ orr r1, r1, r12, lsl #31
+ str r1, [r0, #28]
+ lsr r1, r12, #1
+ str r1, [r0, #32]
+ pop {r4, r5, r6, r7, r8, lr}
+ mov pc, lr
+.Lfunc_end135:
+ .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add9L
+ .align 2
+ .type mcl_fp_add9L,%function
+mcl_fp_add9L: @ @mcl_fp_add9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldm r1, {r12, lr}
+ ldr r5, [r2]
+ ldr r9, [r1, #8]
+ ldr r8, [r1, #12]
+ ldmib r2, {r4, r6, r7}
+ adds r12, r5, r12
+ ldr r5, [r1, #24]
+ adcs lr, r4, lr
+ ldr r4, [r1, #20]
+ str r12, [sp, #8] @ 4-byte Spill
+ adcs r10, r6, r9
+ ldr r6, [r1, #16]
+ adcs r9, r7, r8
+ ldr r7, [r2, #16]
+ str r10, [sp, #4] @ 4-byte Spill
+ adcs r6, r7, r6
+ ldr r7, [r2, #20]
+ adcs r7, r7, r4
+ ldr r4, [r2, #24]
+ adcs r11, r4, r5
+ ldr r5, [r1, #28]
+ ldr r4, [r2, #28]
+ ldr r1, [r1, #32]
+ ldr r2, [r2, #32]
+ adcs r8, r4, r5
+ adcs r4, r2, r1
+ mov r2, lr
+ add r1, r0, #16
+ str r4, [r0, #32]
+ str r12, [r0]
+ stmib r0, {r2, r10}
+ str r9, [r0, #12]
+ stm r1, {r6, r7, r11}
+ mov r1, #0
+ str r8, [r0, #28]
+ adc r1, r1, #0
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r3, {r1, r5, lr}
+ ldr r10, [sp, #8] @ 4-byte Reload
+ ldr r12, [r3, #12]
+ subs r1, r10, r1
+ str r1, [sp, #8] @ 4-byte Spill
+ sbcs r1, r2, r5
+ ldr r5, [r3, #20]
+ str r1, [sp] @ 4-byte Spill
+ ldr r1, [sp, #4] @ 4-byte Reload
+ sbcs r2, r1, lr
+ ldr r1, [r3, #16]
+ sbcs r12, r9, r12
+ sbcs r1, r6, r1
+ ldr r6, [r3, #24]
+ sbcs r5, r7, r5
+ ldr r7, [r3, #28]
+ ldr r3, [r3, #32]
+ sbcs r6, r11, r6
+ sbcs r7, r8, r7
+ sbcs r3, r4, r3
+ ldr r4, [sp, #12] @ 4-byte Reload
+ sbc r4, r4, #0
+ tst r4, #1
+ bne .LBB136_2
+@ BB#1: @ %nocarry
+ str r3, [r0, #32]
+ ldr r3, [sp, #8] @ 4-byte Reload
+ str r3, [r0]
+ ldr r3, [sp] @ 4-byte Reload
+ str r3, [r0, #4]
+ str r2, [r0, #8]
+ str r12, [r0, #12]
+ add r0, r0, #16
+ stm r0, {r1, r5, r6, r7}
+.LBB136_2: @ %carry
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end136:
+ .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF9L
+ .align 2
+ .type mcl_fp_addNF9L,%function
+mcl_fp_addNF9L: @ @mcl_fp_addNF9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #52
+ sub sp, sp, #52
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r5, [r2]
+ ldr r12, [r1, #12]
+ ldmib r2, {r4, r6, r7}
+ ldr r10, [r3, #4]
+ adds r5, r5, r9
+ adcs r9, r4, r8
+ ldr r4, [r1, #16]
+ ldr r8, [r1, #20]
+ str r5, [sp, #16] @ 4-byte Spill
+ ldr r5, [r1, #24]
+ adcs r11, r6, lr
+ ldr lr, [sp, #16] @ 4-byte Reload
+ str r9, [sp, #28] @ 4-byte Spill
+ adcs r12, r7, r12
+ ldr r7, [r2, #16]
+ str r12, [sp, #32] @ 4-byte Spill
+ adcs r6, r7, r4
+ ldr r7, [r2, #20]
+ str r6, [sp, #36] @ 4-byte Spill
+ adcs r4, r7, r8
+ ldr r7, [r2, #24]
+ ldr r8, [r3]
+ str r4, [sp, #40] @ 4-byte Spill
+ adcs r7, r7, r5
+ ldr r5, [r2, #28]
+ ldr r2, [r2, #32]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r1, #28]
+ ldr r1, [r1, #32]
+ adcs r7, r5, r7
+ ldr r5, [r3, #8]
+ adc r1, r2, r1
+ ldr r2, [r3, #16]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r3, #12]
+ subs r8, lr, r8
+ str r1, [sp, #24] @ 4-byte Spill
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r3, #20]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r3, #24]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r3, #28]
+ ldr r3, [r3, #32]
+ str r3, [sp] @ 4-byte Spill
+ ldr r3, [sp, #4] @ 4-byte Reload
+ str r2, [sp, #20] @ 4-byte Spill
+ sbcs r2, r9, r10
+ sbcs r5, r11, r5
+ sbcs r7, r12, r7
+ sbcs r12, r6, r3
+ ldr r3, [sp, #8] @ 4-byte Reload
+ sbcs r6, r4, r3
+ ldr r4, [sp, #48] @ 4-byte Reload
+ ldr r3, [sp, #12] @ 4-byte Reload
+ sbcs r9, r4, r3
+ ldr r3, [sp, #44] @ 4-byte Reload
+ ldr r4, [sp, #20] @ 4-byte Reload
+ sbcs r10, r3, r4
+ ldr r3, [sp] @ 4-byte Reload
+ ldr r4, [sp, #28] @ 4-byte Reload
+ sbc r3, r1, r3
+ asr r1, r3, #31
+ cmp r1, #0
+ movlt r8, lr
+ movlt r2, r4
+ movlt r5, r11
+ cmp r1, #0
+ str r8, [r0]
+ str r2, [r0, #4]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r5, [r0, #8]
+ movlt r7, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r7, [r0, #12]
+ movlt r12, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r12, [r0, #16]
+ movlt r6, r2
+ cmp r1, #0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r6, [r0, #20]
+ movlt r9, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r9, [r0, #24]
+ movlt r10, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r10, [r0, #28]
+ movlt r3, r1
+ str r3, [r0, #32]
+ add sp, sp, #52
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end137:
+ .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub9L
+ .align 2
+ .type mcl_fp_sub9L,%function
+mcl_fp_sub9L: @ @mcl_fp_sub9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #24
+ sub sp, sp, #24
+ ldm r2, {r12, lr}
+ ldr r5, [r1]
+ ldr r8, [r2, #8]
+ ldr r9, [r2, #12]
+ ldmib r1, {r4, r6, r7}
+ subs r12, r5, r12
+ ldr r5, [r2, #24]
+ sbcs lr, r4, lr
+ ldr r4, [r2, #20]
+ sbcs r8, r6, r8
+ ldr r6, [r2, #16]
+ sbcs r9, r7, r9
+ ldr r7, [r1, #16]
+ sbcs r10, r7, r6
+ ldr r7, [r1, #20]
+ ldr r6, [r1, #28]
+ sbcs r7, r7, r4
+ ldr r4, [r1, #24]
+ ldr r1, [r1, #32]
+ sbcs r4, r4, r5
+ ldr r5, [r2, #28]
+ ldr r2, [r2, #32]
+ sbcs r5, r6, r5
+ sbcs r1, r1, r2
+ add r2, r0, #8
+ str r1, [r0, #32]
+ stm r0, {r12, lr}
+ stm r2, {r8, r9, r10}
+ mov r2, #0
+ str r7, [r0, #20]
+ str r4, [r0, #24]
+ str r5, [r0, #28]
+ sbc r2, r2, #0
+ tst r2, #1
+ beq .LBB138_2
+@ BB#1: @ %carry
+ ldr r2, [r3, #32]
+ ldr r6, [r3, #4]
+ ldr r11, [r3, #12]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r3, #8]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r3, #16]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r3, #20]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r3, #24]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r3, #28]
+ ldr r3, [r3]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [sp] @ 4-byte Reload
+ adds r3, r3, r12
+ adcs r6, r6, lr
+ adcs r8, r2, r8
+ ldr r2, [sp, #4] @ 4-byte Reload
+ adcs r12, r11, r9
+ adcs lr, r2, r10
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r7, r2, r7
+ ldr r2, [sp, #12] @ 4-byte Reload
+ adcs r4, r2, r4
+ ldr r2, [sp, #16] @ 4-byte Reload
+ stm r0, {r3, r6, r8, r12, lr}
+ str r7, [r0, #20]
+ str r4, [r0, #24]
+ adcs r5, r2, r5
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r5, [r0, #28]
+ adc r1, r2, r1
+ str r1, [r0, #32]
+.LBB138_2: @ %nocarry
+ add sp, sp, #24
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end138:
+ .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF9L
+ .align 2
+ .type mcl_fp_subNF9L,%function
+mcl_fp_subNF9L: @ @mcl_fp_subNF9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #52
+ sub sp, sp, #52
+ ldr r7, [r2, #32]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldm r2, {r6, r8}
+ ldr r7, [r2, #8]
+ ldr r5, [r2, #16]
+ ldr r4, [r1, #16]
+ ldr r11, [r1, #20]
+ ldr r10, [r1, #24]
+ ldr r9, [r1, #28]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #12]
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r2, [r2, #28]
+ str r7, [sp, #24] @ 4-byte Spill
+ str r2, [sp, #28] @ 4-byte Spill
+ ldm r1, {r1, r2, r12, lr}
+ subs r6, r1, r6
+ ldr r1, [sp, #36] @ 4-byte Reload
+ sbcs r7, r2, r8
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r6, [sp, #12] @ 4-byte Spill
+ str r7, [sp, #16] @ 4-byte Spill
+ sbcs r8, r12, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r8, [sp, #20] @ 4-byte Spill
+ sbcs r12, lr, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r5, r4, r5
+ str r12, [sp, #32] @ 4-byte Spill
+ str r5, [sp, #36] @ 4-byte Spill
+ sbcs lr, r11, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ ldr r11, [r3, #16]
+ str lr, [sp, #40] @ 4-byte Spill
+ sbcs r4, r10, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ ldr r10, [r3, #20]
+ str r4, [sp, #24] @ 4-byte Spill
+ sbcs r9, r9, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbc r1, r2, r1
+ ldr r2, [r3, #24]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [r3, #4]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [r3, #8]
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [r3, #12]
+ str r1, [sp] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ ldr r3, [r3]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adds r3, r6, r3
+ adcs r6, r7, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r7, r8, r1
+ ldr r1, [sp] @ 4-byte Reload
+ adcs r1, r12, r1
+ adcs r12, r5, r11
+ adcs r5, lr, r10
+ ldr r10, [sp, #12] @ 4-byte Reload
+ adcs lr, r4, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ ldr r4, [sp, #48] @ 4-byte Reload
+ adcs r8, r9, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adc r11, r4, r2
+ asr r2, r4, #31
+ cmp r2, #0
+ movge r3, r10
+ str r3, [r0]
+ ldr r3, [sp, #16] @ 4-byte Reload
+ movge r6, r3
+ ldr r3, [sp, #20] @ 4-byte Reload
+ str r6, [r0, #4]
+ movge r7, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ cmp r2, #0
+ str r7, [r0, #8]
+ movge r1, r3
+ str r1, [r0, #12]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ movge r12, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r12, [r0, #16]
+ movge r5, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ cmp r2, #0
+ movge r8, r9
+ movge r11, r4
+ str r5, [r0, #20]
+ movge lr, r1
+ str lr, [r0, #24]
+ str r8, [r0, #28]
+ str r11, [r0, #32]
+ add sp, sp, #52
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end139:
+ .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add9L
+ .align 2
+ .type mcl_fpDbl_add9L,%function
+mcl_fpDbl_add9L: @ @mcl_fpDbl_add9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #88
+ sub sp, sp, #88
+ ldm r1, {r7, r9}
+ ldr r8, [r1, #8]
+ ldr lr, [r1, #12]
+ ldm r2, {r4, r5, r6, r12}
+ add r11, r2, #16
+ adds r4, r4, r7
+ ldr r7, [r2, #28]
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r2, #64]
+ str r7, [sp, #24] @ 4-byte Spill
+ str r4, [sp, #76] @ 4-byte Spill
+ ldr r4, [r2, #68]
+ str r4, [sp, #80] @ 4-byte Spill
+ adcs r4, r5, r9
+ str r4, [sp, #32] @ 4-byte Spill
+ adcs r4, r6, r8
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [r2, #32]
+ adcs r9, r12, lr
+ add lr, r1, #16
+ str r4, [sp, #48] @ 4-byte Spill
+ ldr r4, [r2, #36]
+ str r4, [sp, #52] @ 4-byte Spill
+ ldr r4, [r2, #40]
+ str r4, [sp, #56] @ 4-byte Spill
+ ldr r4, [r2, #44]
+ str r4, [sp, #60] @ 4-byte Spill
+ ldr r4, [r2, #48]
+ str r4, [sp, #64] @ 4-byte Spill
+ ldr r4, [r2, #52]
+ str r4, [sp, #68] @ 4-byte Spill
+ ldr r4, [r2, #56]
+ str r4, [sp, #72] @ 4-byte Spill
+ ldr r4, [r2, #60]
+ str r4, [sp, #84] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r2, [r1, #64]
+ ldr r8, [r1, #32]
+ ldr r4, [r1, #36]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r10, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r10, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r1, r5, r1
+ adcs r2, r6, r2
+ str r7, [r0, #8]
+ str r9, [r0, #12]
+ str r1, [r0, #16]
+ str r2, [r0, #20]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r1, r11, r12
+ str r1, [r0, #24]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [r0, #28]
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [r0, #32]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r4, r2, r4
+ ldr r2, [sp] @ 4-byte Reload
+ adcs r5, r1, r2
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r5, [sp, #56] @ 4-byte Spill
+ adcs lr, r1, r2
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str lr, [sp, #60] @ 4-byte Spill
+ adcs r12, r1, r2
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r12, [sp, #64] @ 4-byte Spill
+ adcs r7, r1, r2
+ ldr r1, [sp, #72] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r7, [sp, #68] @ 4-byte Spill
+ adcs r8, r1, r2
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r8, [sp, #72] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r6, r1, r2
+ ldr r1, [sp, #80] @ 4-byte Reload
+ ldr r2, [sp, #44] @ 4-byte Reload
+ adcs r9, r1, r2
+ mov r2, #0
+ adc r1, r2, #0
+ str r9, [sp, #76] @ 4-byte Spill
+ str r1, [sp, #80] @ 4-byte Spill
+ ldmib r3, {r2, r11}
+ ldr r1, [r3, #12]
+ ldr r10, [r3]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ subs r10, r4, r10
+ sbcs r2, r5, r2
+ sbcs r11, lr, r11
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ ldr r5, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ ldr r3, [r3, #32]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ sbcs r1, r12, r1
+ sbcs r12, r7, r5
+ ldr r7, [sp, #44] @ 4-byte Reload
+ ldr r5, [sp, #84] @ 4-byte Reload
+ sbcs lr, r8, r7
+ ldr r7, [sp, #48] @ 4-byte Reload
+ mov r8, r6
+ sbcs r7, r5, r7
+ ldr r5, [sp, #52] @ 4-byte Reload
+ sbcs r5, r6, r5
+ sbcs r6, r9, r3
+ ldr r3, [sp, #80] @ 4-byte Reload
+ sbc r9, r3, #0
+ ldr r3, [sp, #56] @ 4-byte Reload
+ ands r9, r9, #1
+ movne r10, r4
+ str r10, [r0, #36]
+ movne r2, r3
+ str r2, [r0, #40]
+ ldr r2, [sp, #60] @ 4-byte Reload
+ movne r11, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ cmp r9, #0
+ str r11, [r0, #44]
+ movne r1, r2
+ str r1, [r0, #48]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ movne r12, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r12, [r0, #52]
+ movne lr, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ cmp r9, #0
+ movne r5, r8
+ str lr, [r0, #56]
+ movne r7, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r7, [r0, #60]
+ str r5, [r0, #64]
+ movne r6, r1
+ str r6, [r0, #68]
+ add sp, sp, #88
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end140:
+ .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub9L
+ .align 2
+ .type mcl_fpDbl_sub9L,%function
+mcl_fpDbl_sub9L: @ @mcl_fpDbl_sub9L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #80
+ sub sp, sp, #80
+ ldr r7, [r2, #64]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldm r2, {r5, r6, r7, r8}
+ ldr r4, [r2, #16]
+ ldr r10, [r2, #24]
+ str r4, [sp, #20] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ ldr r2, [r2, #28]
+ str r4, [sp, #24] @ 4-byte Spill
+ str r2, [sp, #32] @ 4-byte Spill
+ ldm r1, {r2, r12, lr}
+ ldr r4, [r1, #12]
+ ldr r11, [r1, #60]
+ subs r9, r2, r5
+ ldr r2, [r1, #64]
+ sbcs r5, r12, r6
+ sbcs r6, lr, r7
+ add lr, r1, #16
+ ldr r7, [r1, #36]
+ sbcs r4, r4, r8
+ ldr r8, [r1, #32]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ str r9, [r0]
+ stmib r0, {r5, r6}
+ str r4, [r0, #12]
+ ldr r5, [sp, #20] @ 4-byte Reload
+ ldr r4, [sp, #24] @ 4-byte Reload
+ sbcs r1, r1, r5
+ ldr r5, [sp, #16] @ 4-byte Reload
+ sbcs r2, r2, r4
+ str r1, [r0, #16]
+ str r2, [r0, #20]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ sbcs r1, r12, r10
+ str r1, [r0, #24]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r2, lr, r2
+ str r2, [r0, #28]
+ ldr r2, [sp, #44] @ 4-byte Reload
+ sbcs r1, r8, r1
+ str r1, [r0, #32]
+ sbcs r1, r7, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ ldr r7, [sp] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ sbcs r4, r7, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ sbcs r9, r7, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ ldr r7, [sp, #8] @ 4-byte Reload
+ sbcs r12, r7, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r12, [sp, #56] @ 4-byte Spill
+ sbcs lr, r7, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ ldr r7, [sp, #36] @ 4-byte Reload
+ str lr, [sp, #60] @ 4-byte Spill
+ sbcs r10, r5, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ ldr r5, [sp, #28] @ 4-byte Reload
+ str r10, [sp, #64] @ 4-byte Spill
+ sbcs r6, r11, r2
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r6, [sp, #68] @ 4-byte Spill
+ sbcs r8, r7, r2
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str r8, [sp, #44] @ 4-byte Spill
+ sbcs r11, r5, r2
+ mov r2, #0
+ sbc r2, r2, #0
+ str r11, [sp, #76] @ 4-byte Spill
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r2, [r3, #32]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldmib r3, {r5, r7}
+ ldr r2, [r3, #12]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [r3, #16]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r3, #20]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r3, #24]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r3, #28]
+ ldr r3, [r3]
+ adds r3, r1, r3
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r5, r4, r5
+ adcs r1, r9, r7
+ ldr r7, [sp, #32] @ 4-byte Reload
+ adcs r2, r12, r2
+ adcs r12, lr, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs lr, r10, r7
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adcs r10, r6, r7
+ ldr r6, [sp, #40] @ 4-byte Reload
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r6, r8, r6
+ adc r11, r11, r7
+ ldr r7, [sp, #72] @ 4-byte Reload
+ ands r8, r7, #1
+ ldr r7, [sp, #48] @ 4-byte Reload
+ moveq r5, r4
+ moveq r1, r9
+ moveq r3, r7
+ cmp r8, #0
+ str r3, [r0, #36]
+ str r5, [r0, #40]
+ str r1, [r0, #44]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r2, [r0, #48]
+ moveq r12, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r12, [r0, #52]
+ moveq lr, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ cmp r8, #0
+ str lr, [r0, #56]
+ moveq r10, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r10, [r0, #60]
+ moveq r6, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r6, [r0, #64]
+ moveq r11, r1
+ str r11, [r0, #68]
+ add sp, sp, #80
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end141:
+ .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv320x32,%function
+.LmulPv320x32: @ @mulPv320x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r3, [r1, #32]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #32]
+ ldr r1, [r1, #36]
+ umull r3, r7, r1, r2
+ adcs r1, r6, r3
+ str r1, [r0, #36]
+ adc r1, r7, #0
+ str r1, [r0, #40]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end142:
+ .size .LmulPv320x32, .Lfunc_end142-.LmulPv320x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre10L
+ .align 2
+ .type mcl_fp_mulUnitPre10L,%function
+mcl_fp_mulUnitPre10L: @ @mcl_fp_mulUnitPre10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ .pad #48
+ sub sp, sp, #48
+ mov r4, r0
+ mov r0, sp
+ bl .LmulPv320x32(PLT)
+ ldr r12, [sp, #40]
+ ldr lr, [sp, #36]
+ ldr r8, [sp, #32]
+ ldr r9, [sp, #28]
+ ldr r0, [sp, #24]
+ ldr r1, [sp, #20]
+ ldm sp, {r6, r7}
+ add r5, sp, #8
+ ldm r5, {r2, r3, r5}
+ stm r4, {r6, r7}
+ add r6, r4, #8
+ stm r6, {r2, r3, r5}
+ str r1, [r4, #20]
+ str r0, [r4, #24]
+ str r9, [r4, #28]
+ str r8, [r4, #32]
+ str lr, [r4, #36]
+ str r12, [r4, #40]
+ add sp, sp, #48
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end143:
+ .size mcl_fp_mulUnitPre10L, .Lfunc_end143-mcl_fp_mulUnitPre10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre10L
+ .align 2
+ .type mcl_fpDbl_mulPre10L,%function
+mcl_fpDbl_mulPre10L: @ @mcl_fpDbl_mulPre10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #156
+ sub sp, sp, #156
+ mov r6, r2
+ mov r5, r1
+ mov r4, r0
+ bl mcl_fpDbl_mulPre5L(PLT)
+ add r0, r4, #40
+ add r1, r5, #20
+ add r2, r6, #20
+ bl mcl_fpDbl_mulPre5L(PLT)
+ add r11, r6, #24
+ ldr r7, [r6, #12]
+ ldr r8, [r6, #16]
+ ldr r1, [r6, #20]
+ ldm r11, {r0, r2, r10, r11}
+ ldm r6, {r6, r9, r12}
+ adds lr, r6, r1
+ adcs r3, r9, r0
+ mov r0, #0
+ str lr, [sp, #72] @ 4-byte Spill
+ adcs r2, r12, r2
+ str r3, [sp, #68] @ 4-byte Spill
+ adcs r12, r7, r10
+ str r2, [sp, #64] @ 4-byte Spill
+ adcs r10, r8, r11
+ str r12, [sp, #60] @ 4-byte Spill
+ adc r6, r0, #0
+ ldr r0, [r5, #32]
+ str r10, [sp, #56] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r5, #36]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldmib r5, {r8, r9, r11}
+ ldr r0, [r5, #16]
+ ldr r7, [r5, #20]
+ ldr r1, [r5, #28]
+ str lr, [sp, #76]
+ str r3, [sp, #80]
+ str r2, [sp, #84]
+ str r12, [sp, #88]
+ str r10, [sp, #92]
+ add r2, sp, #76
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r5, #24]
+ ldr r5, [r5]
+ adds r5, r5, r7
+ adcs r7, r8, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ str r5, [sp, #96]
+ adcs r9, r9, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r7, [sp, #100]
+ str r9, [sp, #104]
+ adcs r11, r11, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r11, [sp, #108]
+ adcs r8, r1, r0
+ mov r0, #0
+ add r1, sp, #96
+ adc r10, r0, #0
+ add r0, sp, #116
+ str r8, [sp, #112]
+ bl mcl_fpDbl_mulPre5L(PLT)
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ cmp r6, #0
+ ldr r2, [sp, #64] @ 4-byte Reload
+ ldr r3, [sp, #60] @ 4-byte Reload
+ moveq r5, r6
+ moveq r8, r6
+ moveq r11, r6
+ moveq r9, r6
+ moveq r7, r6
+ str r5, [sp, #52] @ 4-byte Spill
+ adds r0, r5, r0
+ ldr r5, [sp, #56] @ 4-byte Reload
+ adcs r1, r7, r1
+ adcs r2, r9, r2
+ adcs r3, r11, r3
+ adcs r12, r8, r5
+ mov r5, #0
+ adc lr, r5, #0
+ cmp r10, #0
+ ldr r5, [sp, #52] @ 4-byte Reload
+ moveq r1, r7
+ ldr r7, [sp, #136]
+ moveq r3, r11
+ moveq r2, r9
+ moveq r12, r8
+ moveq lr, r10
+ cmp r10, #0
+ moveq r0, r5
+ and r5, r6, r10
+ ldr r6, [sp, #152]
+ adds r8, r0, r7
+ ldr r7, [sp, #140]
+ adcs r10, r1, r7
+ ldr r7, [sp, #144]
+ adcs r11, r2, r7
+ ldr r7, [sp, #148]
+ adcs r0, r3, r7
+ adcs r12, r12, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ adc r9, lr, r5
+ ldm r4, {r5, r6, r7, lr}
+ ldr r1, [sp, #116]
+ ldr r2, [sp, #120]
+ ldr r0, [sp, #124]
+ ldr r3, [sp, #128]
+ subs r1, r1, r5
+ sbcs r2, r2, r6
+ ldr r6, [sp, #132]
+ sbcs r0, r0, r7
+ ldr r7, [r4, #16]
+ sbcs lr, r3, lr
+ ldr r3, [r4, #20]
+ sbcs r5, r6, r7
+ ldr r6, [r4, #32]
+ ldr r7, [r4, #52]
+ str r3, [sp, #72] @ 4-byte Spill
+ sbcs r3, r8, r3
+ ldr r8, [r4, #56]
+ str r3, [sp, #44] @ 4-byte Spill
+ ldr r3, [r4, #24]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r3, [sp, #68] @ 4-byte Spill
+ sbcs r3, r10, r3
+ ldr r10, [r4, #44]
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [r4, #28]
+ str r3, [sp, #64] @ 4-byte Spill
+ sbcs r3, r11, r3
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [sp, #60] @ 4-byte Reload
+ sbcs r3, r3, r6
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [r4, #36]
+ str r3, [sp, #60] @ 4-byte Spill
+ sbcs r3, r12, r3
+ ldr r12, [r4, #64]
+ str r3, [sp, #40] @ 4-byte Spill
+ sbc r3, r9, #0
+ ldr r9, [r4, #40]
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r4, #76]
+ subs r1, r1, r9
+ sbcs r2, r2, r10
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r4, #48]
+ ldr r11, [sp, #32] @ 4-byte Reload
+ sbcs r0, r0, r2
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r4, #72]
+ str r0, [sp, #24] @ 4-byte Spill
+ sbcs r0, lr, r7
+ ldr lr, [r4, #68]
+ str r0, [sp, #16] @ 4-byte Spill
+ sbcs r0, r5, r8
+ ldr r5, [r4, #60]
+ ldr r6, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbcs r0, r0, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ sbcs r0, r0, r12
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ sbcs r0, r0, lr
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ sbcs r0, r0, r2
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ sbcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ sbc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adds r0, r0, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [r4, #20]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r11
+ adcs r0, r0, r6
+ str r1, [r4, #24]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ ldr r6, [sp, #16] @ 4-byte Reload
+ str r0, [r4, #28]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #12] @ 4-byte Reload
+ str r1, [r4, #32]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #8] @ 4-byte Reload
+ str r0, [r4, #36]
+ adcs r1, r9, r1
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r1, [r4, #40]
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r0, r10, r0
+ adcs r1, r1, r6
+ str r0, [r4, #44]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r6, [sp, #52] @ 4-byte Reload
+ str r1, [r4, #48]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r7, r0
+ adcs r1, r8, r1
+ adcs r5, r5, r6
+ adcs r7, r12, #0
+ add r12, r4, #52
+ adcs r6, lr, #0
+ stm r12, {r0, r1, r5, r7}
+ adcs r2, r2, #0
+ str r6, [r4, #68]
+ adc r3, r3, #0
+ str r2, [r4, #72]
+ str r3, [r4, #76]
+ add sp, sp, #156
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end144:
+ .size mcl_fpDbl_mulPre10L, .Lfunc_end144-mcl_fpDbl_mulPre10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre10L
+ .align 2
+ .type mcl_fpDbl_sqrPre10L,%function
+mcl_fpDbl_sqrPre10L: @ @mcl_fpDbl_sqrPre10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #156
+ sub sp, sp, #156
+ mov r5, r1
+ mov r4, r0
+ mov r2, r5
+ bl mcl_fpDbl_mulPre5L(PLT)
+ add r1, r5, #20
+ add r0, r4, #40
+ mov r2, r1
+ bl mcl_fpDbl_mulPre5L(PLT)
+ ldr lr, [r5, #32]
+ ldr r12, [r5, #36]
+ ldmib r5, {r2, r3, r6, r8}
+ ldr r0, [r5, #20]
+ ldr r7, [r5, #24]
+ ldr r1, [r5, #28]
+ ldr r5, [r5]
+ adds r5, r5, r0
+ adcs r0, r2, r7
+ str r5, [sp, #96]
+ str r5, [sp, #76]
+ adcs r1, r3, r1
+ add r3, sp, #80
+ str r0, [sp, #100]
+ adcs r2, r6, lr
+ str r1, [sp, #104]
+ adcs r6, r8, r12
+ str r2, [sp, #108]
+ str r6, [sp, #112]
+ stm r3, {r0, r1, r2, r6}
+ lsr r3, r2, #31
+ orr r3, r3, r6, lsl #1
+ str r3, [sp, #72] @ 4-byte Spill
+ lsr r3, r1, #31
+ lsl r1, r1, #1
+ orr r1, r1, r0, lsr #31
+ orr r2, r3, r2, lsl #1
+ str r1, [sp, #64] @ 4-byte Spill
+ lsr r1, r5, #31
+ str r2, [sp, #68] @ 4-byte Spill
+ add r2, sp, #76
+ orr r11, r1, r0, lsl #1
+ mov r0, #0
+ add r1, sp, #96
+ adc r7, r0, #0
+ add r0, sp, #116
+ bl mcl_fpDbl_mulPre5L(PLT)
+ ldr r10, [sp, #136]
+ ldr r9, [sp, #140]
+ ldr r8, [sp, #144]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r2, [sp, #148]
+ ldr r1, [sp, #152]
+ adds r3, r10, r5, lsl #1
+ adcs r5, r9, r11
+ adcs r12, r8, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs lr, r2, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r11, r1, r0
+ adc r6, r7, r6, lsr #31
+ cmp r7, #0
+ moveq lr, r2
+ moveq r12, r8
+ moveq r11, r1
+ moveq r6, r7
+ moveq r5, r9
+ cmp r7, #0
+ add r7, sp, #116
+ moveq r3, r10
+ ldm r4, {r9, r10}
+ ldr r0, [r4, #8]
+ ldr r8, [r4, #12]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldm r7, {r1, r2, r7}
+ ldr r0, [sp, #128]
+ subs r1, r1, r9
+ ldr r9, [r4, #40]
+ sbcs r2, r2, r10
+ ldr r10, [r4, #44]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [sp, #72] @ 4-byte Reload
+ sbcs r7, r7, r2
+ ldr r2, [r4, #48]
+ str r7, [sp, #44] @ 4-byte Spill
+ sbcs r8, r0, r8
+ ldr r0, [r4, #16]
+ ldr r7, [sp, #132]
+ str r2, [sp, #16] @ 4-byte Spill
+ sbcs r0, r7, r0
+ ldr r7, [r4, #52]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r4, #20]
+ sbcs r3, r3, r0
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r4, #24]
+ str r3, [sp, #72] @ 4-byte Spill
+ sbcs r3, r5, r3
+ ldr r5, [r4, #60]
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [r4, #28]
+ str r3, [sp, #68] @ 4-byte Spill
+ sbcs r3, r12, r3
+ ldr r12, [r4, #64]
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [r4, #32]
+ str r3, [sp, #64] @ 4-byte Spill
+ sbcs r3, lr, r3
+ ldr lr, [r4, #68]
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [r4, #36]
+ str r3, [sp, #60] @ 4-byte Spill
+ sbcs r3, r11, r3
+ str r3, [sp, #32] @ 4-byte Spill
+ sbc r3, r6, #0
+ subs r1, r1, r9
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r4, #76]
+ sbcs r1, r1, r10
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ ldr r11, [sp, #20] @ 4-byte Reload
+ sbcs r1, r1, r2
+ ldr r2, [r4, #72]
+ str r1, [sp, #44] @ 4-byte Spill
+ sbcs r1, r8, r7
+ ldr r8, [r4, #56]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ ldr r6, [sp, #44] @ 4-byte Reload
+ sbcs r1, r1, r8
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ sbcs r1, r1, r5
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ sbcs r1, r1, r12
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ sbcs r1, r1, lr
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r1, r1, r2
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ sbcs r1, r1, r3
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ sbc r1, r1, #0
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adds r0, r0, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r0, [r4, #20]
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r11
+ adcs r0, r0, r6
+ str r1, [r4, #24]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldr r6, [sp, #12] @ 4-byte Reload
+ str r0, [r4, #28]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #40] @ 4-byte Reload
+ str r1, [r4, #32]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #8] @ 4-byte Reload
+ str r0, [r4, #36]
+ adcs r1, r9, r1
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r1, [r4, #40]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r10, r0
+ adcs r1, r1, r6
+ str r0, [r4, #44]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r6, [sp, #52] @ 4-byte Reload
+ str r1, [r4, #48]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r7, r0
+ adcs r1, r8, r1
+ adcs r5, r5, r6
+ adcs r7, r12, #0
+ add r12, r4, #52
+ adcs r6, lr, #0
+ stm r12, {r0, r1, r5, r7}
+ adcs r2, r2, #0
+ str r6, [r4, #68]
+ adc r3, r3, #0
+ str r2, [r4, #72]
+ str r3, [r4, #76]
+ add sp, sp, #156
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end145:
+ .size mcl_fpDbl_sqrPre10L, .Lfunc_end145-mcl_fpDbl_sqrPre10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont10L
+ .align 2
+ .type mcl_fp_mont10L,%function
+mcl_fp_mont10L: @ @mcl_fp_mont10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #28
+ sub sp, sp, #28
+ .pad #1024
+ sub sp, sp, #1024
+ mov r7, r2
+ ldr r5, [r3, #-4]
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, sp, #1000
+ str r3, [sp, #84] @ 4-byte Spill
+ str r1, [sp, #76] @ 4-byte Spill
+ mov r4, r3
+ mov r6, r1
+ ldr r2, [r7]
+ str r7, [sp, #72] @ 4-byte Spill
+ str r5, [sp, #80] @ 4-byte Spill
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #1004]
+ ldr r10, [sp, #1000]
+ mov r1, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1008]
+ mul r2, r10, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1012]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1032]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1024]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1020]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1016]
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #952
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #992]
+ ldr r2, [r7, #4]
+ ldr r9, [sp, #968]
+ ldr r8, [sp, #952]
+ ldr r11, [sp, #956]
+ ldr r5, [sp, #960]
+ ldr r4, [sp, #964]
+ mov r1, r6
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #988]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #984]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #980]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #976]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #972]
+ str r0, [sp, #4] @ 4-byte Spill
+ add r0, sp, #904
+ bl .LmulPv320x32(PLT)
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adds r0, r8, r10
+ ldr r2, [sp, #4] @ 4-byte Reload
+ add lr, sp, #908
+ ldr r10, [sp, #944]
+ mov r0, #0
+ adcs r1, r11, r1
+ add r11, sp, #932
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r5, r1
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r4, r1
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r9, r1
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldm r11, {r5, r6, r11}
+ ldr r4, [sp, #904]
+ adcs r8, r2, r1
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adds r4, r7, r4
+ ldr r7, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #856
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #896]
+ add r11, sp, #856
+ ldr r6, [sp, #880]
+ ldr r7, [sp, #876]
+ ldr r5, [sp, #872]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #888]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #884]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #808
+ bl .LmulPv320x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #808
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #848]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #832
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r11}
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #760
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #800]
+ add r11, sp, #760
+ ldr r6, [sp, #784]
+ ldr r4, [sp, #780]
+ ldr r5, [sp, #776]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #712
+ bl .LmulPv320x32(PLT)
+ adds r0, r7, r8
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #716
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #752]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #740
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r5, r6, r11}
+ ldr r4, [sp, #712]
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r4, r7, r4
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #664
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #704]
+ add r11, sp, #664
+ ldr r6, [sp, #688]
+ ldr r7, [sp, #684]
+ ldr r5, [sp, #680]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #616
+ bl .LmulPv320x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #616
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #656]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #640
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r11}
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #568
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #608]
+ add r11, sp, #568
+ ldr r6, [sp, #592]
+ ldr r4, [sp, #588]
+ ldr r5, [sp, #584]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #604]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #600]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #520
+ bl .LmulPv320x32(PLT)
+ adds r0, r7, r8
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #524
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #560]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #548
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r5, r6, r11}
+ ldr r4, [sp, #520]
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r4, r7, r4
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #472
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #512]
+ add r11, sp, #472
+ ldr r6, [sp, #496]
+ ldr r7, [sp, #492]
+ ldr r5, [sp, #488]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #500]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #424
+ bl .LmulPv320x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #424
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #464]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #448
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r11}
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #376
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #416]
+ add r11, sp, #376
+ ldr r6, [sp, #400]
+ ldr r4, [sp, #396]
+ ldr r5, [sp, #392]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #404]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #328
+ bl .LmulPv320x32(PLT)
+ adds r0, r7, r8
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #332
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #368]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #356
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r5, r6, r11}
+ ldr r4, [sp, #328]
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r4, r7, r4
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #280
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #320]
+ add r11, sp, #280
+ ldr r6, [sp, #304]
+ ldr r7, [sp, #300]
+ ldr r5, [sp, #296]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #316]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #312]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #308]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #232
+ bl .LmulPv320x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #232
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #272]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #256
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r11}
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #184
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #224]
+ add r11, sp, #184
+ ldr r6, [sp, #208]
+ ldr r4, [sp, #204]
+ ldr r5, [sp, #200]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #220]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #216]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #212]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #136
+ bl .LmulPv320x32(PLT)
+ adds r0, r7, r8
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ add lr, sp, #136
+ add r7, sp, #152
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ add r9, sp, #164
+ adcs r10, r1, r10
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r11, r1, r11
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #44] @ 4-byte Spill
+ ldm lr, {r2, r6, r12, lr}
+ ldr r8, [sp, #176]
+ adds r4, r0, r2
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldm r9, {r3, r5, r9}
+ adcs r6, r10, r6
+ mul r2, r4, r0
+ ldm r7, {r0, r1, r7}
+ str r6, [sp, #40] @ 4-byte Spill
+ adcs r6, r11, r12
+ ldr r11, [sp, #84] @ 4-byte Reload
+ str r6, [sp, #36] @ 4-byte Spill
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adcs r10, r6, lr
+ ldr r6, [sp, #72] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ mov r1, r11
+ adcs r0, r0, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r7, r0, r8
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #88
+ bl .LmulPv320x32(PLT)
+ add r3, sp, #88
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r4, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r4, r0, r2
+ ldr r2, [sp, #104]
+ adcs r0, r10, r3
+ str r4, [sp, #40] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #108]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r5, r6, r2
+ ldr r2, [sp, #112]
+ str r5, [sp, #48] @ 4-byte Spill
+ adcs r12, r0, r2
+ ldr r2, [sp, #116]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r12, [sp, #52] @ 4-byte Spill
+ adcs lr, r0, r2
+ ldr r2, [sp, #120]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str lr, [sp, #60] @ 4-byte Spill
+ adcs r0, r0, r2
+ ldr r2, [sp, #124]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #128]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r9, r7, r2
+ adc r0, r0, #0
+ str r9, [sp, #64] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, r11
+ ldr r2, [r0, #16]
+ ldr r10, [r0]
+ ldr r3, [r0, #4]
+ ldr r1, [r0, #8]
+ ldr r6, [r0, #12]
+ ldr r7, [r0, #24]
+ ldr r11, [r0, #32]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r0, #20]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r0, #28]
+ ldr r0, [r0, #36]
+ str r2, [sp, #36] @ 4-byte Spill
+ mov r2, r8
+ ldr r8, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ subs r10, r2, r10
+ sbcs r3, r4, r3
+ ldr r4, [sp, #80] @ 4-byte Reload
+ sbcs r1, r8, r1
+ sbcs r6, r4, r6
+ sbcs r4, r5, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ sbcs r5, r12, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r12, lr, r7
+ ldr r7, [sp, #36] @ 4-byte Reload
+ sbcs lr, r0, r7
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r7, [sp, #44] @ 4-byte Reload
+ sbcs r11, r0, r11
+ ldr r0, [sp, #84] @ 4-byte Reload
+ sbcs r0, r9, r0
+ ldr r9, [sp, #68] @ 4-byte Reload
+ sbc r7, r7, #0
+ ands r7, r7, #1
+ movne r10, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ movne r1, r8
+ str r10, [r9]
+ movne r3, r2
+ cmp r7, #0
+ str r3, [r9, #4]
+ str r1, [r9, #8]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ movne r6, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r6, [r9, #12]
+ movne r4, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r4, [r9, #16]
+ movne r5, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ cmp r7, #0
+ str r5, [r9, #20]
+ movne r12, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r12, [r9, #24]
+ movne lr, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str lr, [r9, #28]
+ movne r11, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ cmp r7, #0
+ str r11, [r9, #32]
+ movne r0, r1
+ str r0, [r9, #36]
+ add sp, sp, #28
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end146:
+ .size mcl_fp_mont10L, .Lfunc_end146-mcl_fp_mont10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF10L
+ .align 2
+ .type mcl_fp_montNF10L,%function
+mcl_fp_montNF10L: @ @mcl_fp_montNF10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #28
+ sub sp, sp, #28
+ .pad #1024
+ sub sp, sp, #1024
+ mov r7, r2
+ ldr r5, [r3, #-4]
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, sp, #1000
+ str r3, [sp, #84] @ 4-byte Spill
+ str r1, [sp, #76] @ 4-byte Spill
+ mov r4, r3
+ mov r6, r1
+ ldr r2, [r7]
+ str r7, [sp, #72] @ 4-byte Spill
+ str r5, [sp, #80] @ 4-byte Spill
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #1004]
+ ldr r10, [sp, #1000]
+ mov r1, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1008]
+ mul r2, r10, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1012]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1032]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1024]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1020]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1016]
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #952
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #992]
+ ldr r2, [r7, #4]
+ ldr r9, [sp, #968]
+ ldr r8, [sp, #952]
+ ldr r11, [sp, #956]
+ ldr r5, [sp, #960]
+ ldr r4, [sp, #964]
+ mov r1, r6
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #988]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #984]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #980]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #976]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #972]
+ str r0, [sp, #4] @ 4-byte Spill
+ add r0, sp, #904
+ bl .LmulPv320x32(PLT)
+ adds r0, r8, r10
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #908
+ ldr r10, [sp, #940]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r11, r0
+ ldr r11, [sp, #936]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #932]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #904]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #944]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r1, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r8, r1, r0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #52] @ 4-byte Reload
+ adds r4, r6, r4
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r7, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #856
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #896]
+ add r11, sp, #856
+ ldr r6, [sp, #880]
+ ldr r7, [sp, #876]
+ ldr r5, [sp, #872]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #888]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #884]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #808
+ bl .LmulPv320x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #808
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #848]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #844]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #832
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldm r11, {r4, r5, r11}
+ adc r8, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adds r6, r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #760
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #800]
+ add r11, sp, #760
+ ldr r5, [sp, #784]
+ ldr r7, [sp, #780]
+ ldr r4, [sp, #776]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #712
+ bl .LmulPv320x32(PLT)
+ adds r0, r6, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #716
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #752]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #748]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #744]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #712]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #740]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r8, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adds r4, r6, r4
+ ldr r6, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r7, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #664
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #704]
+ add r11, sp, #664
+ ldr r6, [sp, #688]
+ ldr r7, [sp, #684]
+ ldr r5, [sp, #680]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #616
+ bl .LmulPv320x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #616
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #656]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #652]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #640
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldm r11, {r4, r5, r11}
+ adc r8, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adds r6, r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #568
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #608]
+ add r11, sp, #568
+ ldr r5, [sp, #592]
+ ldr r7, [sp, #588]
+ ldr r4, [sp, #584]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #604]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #600]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #520
+ bl .LmulPv320x32(PLT)
+ adds r0, r6, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #524
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #560]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #556]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #552]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #520]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #548]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r8, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adds r4, r6, r4
+ ldr r6, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r7, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #472
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #512]
+ add r11, sp, #472
+ ldr r6, [sp, #496]
+ ldr r7, [sp, #492]
+ ldr r5, [sp, #488]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #500]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #424
+ bl .LmulPv320x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #424
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #464]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #460]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #448
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldm r11, {r4, r5, r11}
+ adc r8, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adds r6, r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #376
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #416]
+ add r11, sp, #376
+ ldr r5, [sp, #400]
+ ldr r7, [sp, #396]
+ ldr r4, [sp, #392]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #404]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #328
+ bl .LmulPv320x32(PLT)
+ adds r0, r6, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #332
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #368]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #364]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #360]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #328]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #356]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r8, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adds r4, r6, r4
+ ldr r6, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r7, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #280
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #320]
+ add r11, sp, #280
+ ldr r6, [sp, #304]
+ ldr r7, [sp, #300]
+ ldr r5, [sp, #296]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #316]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #312]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #308]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #232
+ bl .LmulPv320x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #232
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #272]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #268]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #256
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r7, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldm r11, {r4, r5, r11}
+ adc r8, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adds r6, r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ adcs r0, r7, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r8, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #184
+ bl .LmulPv320x32(PLT)
+ ldr r0, [sp, #224]
+ add r11, sp, #184
+ ldr r5, [sp, #208]
+ ldr r7, [sp, #204]
+ ldr r4, [sp, #200]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #220]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #216]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #212]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #136
+ bl .LmulPv320x32(PLT)
+ adds r0, r6, r8
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ ldr lr, [sp, #140]
+ ldr r6, [sp, #144]
+ add r8, sp, #152
+ ldr r12, [sp, #148]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ adcs r9, r1, r10
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r10, r1, r11
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #136]
+ str r1, [sp, #48] @ 4-byte Spill
+ adds r4, r0, r2
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r9, r9, lr
+ adcs r11, r10, r6
+ mul r1, r4, r0
+ str r1, [sp, #44] @ 4-byte Spill
+ ldm r8, {r0, r1, r2, r3, r5, r7, r8}
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adcs r10, r6, r12
+ ldr r6, [sp, #72] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #88
+ adc r8, r8, #0
+ bl .LmulPv320x32(PLT)
+ add r3, sp, #88
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r4, r0
+ adcs r7, r9, r1
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r9, r11, r2
+ ldr r2, [sp, #104]
+ str r7, [sp, #48] @ 4-byte Spill
+ adcs lr, r10, r3
+ str lr, [sp, #52] @ 4-byte Spill
+ adcs r6, r0, r2
+ ldr r2, [sp, #108]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r6, [sp, #56] @ 4-byte Spill
+ adcs r0, r0, r2
+ ldr r2, [sp, #112]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r4, r0, r2
+ ldr r2, [sp, #116]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r4, [sp, #60] @ 4-byte Spill
+ adcs r12, r0, r2
+ ldr r2, [sp, #120]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r12, [sp, #64] @ 4-byte Spill
+ adcs r0, r0, r2
+ ldr r2, [sp, #124]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r11, r0, r2
+ ldr r2, [sp, #128]
+ mov r0, r5
+ str r11, [sp, #72] @ 4-byte Spill
+ adc r1, r8, r2
+ str r1, [sp, #44] @ 4-byte Spill
+ ldmib r0, {r2, r8}
+ ldr r5, [r0, #16]
+ ldr r10, [r0]
+ ldr r3, [r0, #12]
+ str r5, [sp, #28] @ 4-byte Spill
+ ldr r5, [r0, #20]
+ subs r10, r7, r10
+ str r5, [sp, #32] @ 4-byte Spill
+ ldr r5, [r0, #24]
+ str r5, [sp, #36] @ 4-byte Spill
+ ldr r5, [r0, #28]
+ str r5, [sp, #40] @ 4-byte Spill
+ mov r5, r0
+ sbcs r0, r9, r2
+ sbcs r2, lr, r8
+ ldr r8, [r5, #32]
+ sbcs r7, r6, r3
+ ldr r3, [r5, #36]
+ ldr r6, [sp, #80] @ 4-byte Reload
+ ldr r5, [sp, #76] @ 4-byte Reload
+ str r3, [sp, #84] @ 4-byte Spill
+ ldr r3, [sp, #28] @ 4-byte Reload
+ sbcs r6, r6, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ sbcs lr, r4, r3
+ ldr r3, [sp, #36] @ 4-byte Reload
+ sbcs r4, r12, r3
+ ldr r3, [sp, #40] @ 4-byte Reload
+ sbcs r12, r5, r3
+ ldr r3, [sp, #84] @ 4-byte Reload
+ ldr r5, [sp, #48] @ 4-byte Reload
+ sbcs r11, r11, r8
+ ldr r8, [sp, #68] @ 4-byte Reload
+ sbc r3, r1, r3
+ asr r1, r3, #31
+ cmp r1, #0
+ movlt r10, r5
+ movlt r0, r9
+ str r10, [r8]
+ str r0, [r8, #4]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ movlt r2, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ cmp r1, #0
+ str r2, [r8, #8]
+ movlt r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r7, [r8, #12]
+ movlt r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ str r6, [r8, #16]
+ movlt lr, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ cmp r1, #0
+ str lr, [r8, #20]
+ movlt r4, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r4, [r8, #24]
+ movlt r12, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r12, [r8, #28]
+ movlt r11, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ cmp r1, #0
+ str r11, [r8, #32]
+ movlt r3, r0
+ str r3, [r8, #36]
+ add sp, sp, #28
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end147:
+ .size mcl_fp_montNF10L, .Lfunc_end147-mcl_fp_montNF10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed10L
+ .align 2
+ .type mcl_fp_montRed10L,%function
+mcl_fp_montRed10L: @ @mcl_fp_montRed10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #612
+ sub sp, sp, #612
+ mov r5, r2
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r9, [r1]
+ ldr r11, [r1, #16]
+ ldr r0, [r5]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [r5, #4]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [r5, #8]
+ str r2, [sp, #52] @ 4-byte Spill
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [r5, #12]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r5, #16]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r5, #20]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r5, #24]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [r5, #-4]
+ str r0, [sp, #124] @ 4-byte Spill
+ mul r2, r9, r0
+ ldr r0, [r5, #28]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r5, #32]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r5, #36]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r1, #72]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #76]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ mov r1, r5
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #560
+ bl .LmulPv320x32(PLT)
+ add lr, sp, #584
+ ldr r10, [sp, #600]
+ ldr r8, [sp, #596]
+ add r7, sp, #564
+ ldm lr, {r6, r12, lr}
+ ldr r4, [sp, #560]
+ ldm r7, {r0, r1, r2, r3, r7}
+ adds r4, r9, r4
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r9, r0, r1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ adcs r0, r11, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #512
+ bl .LmulPv320x32(PLT)
+ add r6, sp, #512
+ ldr r12, [sp, #552]
+ ldr lr, [sp, #548]
+ ldr r2, [sp, #544]
+ ldr r10, [sp, #540]
+ ldr r11, [sp, #536]
+ ldr r7, [sp, #532]
+ ldr r8, [sp, #528]
+ ldm r6, {r1, r3, r6}
+ ldr r0, [sp, #524]
+ adds r1, r4, r1
+ ldr r4, [sp, #124] @ 4-byte Reload
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r9, r9, r3
+ adcs r1, r1, r6
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r9, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #464
+ bl .LmulPv320x32(PLT)
+ ldr r1, [sp, #464]
+ ldr r0, [sp, #504]
+ add r12, sp, #468
+ ldr r10, [sp, #500]
+ ldr r8, [sp, #496]
+ ldr lr, [sp, #492]
+ ldr r6, [sp, #488]
+ ldr r7, [sp, #484]
+ adds r1, r9, r1
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r12, {r0, r2, r3, r12}
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #416
+ bl .LmulPv320x32(PLT)
+ add r7, sp, #416
+ ldr r12, [sp, #456]
+ ldr lr, [sp, #452]
+ ldr r2, [sp, #448]
+ ldr r3, [sp, #444]
+ add r10, sp, #428
+ ldm r7, {r1, r6, r7}
+ ldm r10, {r0, r8, r9, r10}
+ adds r1, r11, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r11, r1, r6
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #368
+ bl .LmulPv320x32(PLT)
+ add r10, sp, #400
+ add r12, sp, #372
+ ldm r10, {r8, r9, r10}
+ ldr r1, [sp, #368]
+ ldr lr, [sp, #396]
+ ldr r6, [sp, #392]
+ ldr r7, [sp, #388]
+ ldm r12, {r0, r2, r3, r12}
+ adds r1, r11, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #320
+ bl .LmulPv320x32(PLT)
+ add r7, sp, #320
+ ldr r12, [sp, #360]
+ ldr lr, [sp, #356]
+ ldr r2, [sp, #352]
+ ldr r3, [sp, #348]
+ add r10, sp, #332
+ ldm r7, {r1, r6, r7}
+ ldm r10, {r0, r8, r9, r10}
+ adds r1, r11, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r11, r1, r6
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #272
+ bl .LmulPv320x32(PLT)
+ add r10, sp, #304
+ add r12, sp, #276
+ ldm r10, {r8, r9, r10}
+ ldr r1, [sp, #272]
+ ldr lr, [sp, #300]
+ ldr r6, [sp, #296]
+ ldr r7, [sp, #292]
+ ldm r12, {r0, r2, r3, r12}
+ adds r1, r11, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #224
+ bl .LmulPv320x32(PLT)
+ add r10, sp, #240
+ add r6, sp, #224
+ ldr r12, [sp, #264]
+ ldr lr, [sp, #260]
+ ldr r8, [sp, #256]
+ ldr r9, [sp, #252]
+ ldm r10, {r0, r7, r10}
+ ldm r6, {r1, r2, r3, r6}
+ adds r1, r11, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r4, r1, r2
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r11, r1, r3
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ mul r2, r4, r7
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r9, r0, r9
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #176
+ bl .LmulPv320x32(PLT)
+ add r12, sp, #176
+ ldm r12, {r0, r1, r3, r12}
+ ldr lr, [sp, #216]
+ adds r0, r4, r0
+ ldr r4, [sp, #76] @ 4-byte Reload
+ adcs r10, r11, r1
+ ldr r1, [sp, #192]
+ adcs r0, r6, r3
+ mul r2, r10, r7
+ ldr r7, [sp, #200]
+ ldr r6, [sp, #204]
+ ldr r3, [sp, #208]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r12
+ ldr r12, [sp, #212]
+ str r0, [sp, #44] @ 4-byte Spill
+ adcs r8, r4, r1
+ ldr r0, [sp, #196]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ mov r1, r5
+ adcs r9, r9, r7
+ adcs r6, r0, r6
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #128
+ bl .LmulPv320x32(PLT)
+ add r3, sp, #128
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r10, r0
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r1, r0, r2
+ ldr r0, [sp, #144]
+ adcs r2, r8, r3
+ ldr r3, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ str r2, [sp, #44] @ 4-byte Spill
+ adcs r7, r11, r0
+ ldr r0, [sp, #148]
+ str r7, [sp, #48] @ 4-byte Spill
+ adcs r12, r9, r0
+ ldr r0, [sp, #152]
+ str r12, [sp, #52] @ 4-byte Spill
+ adcs r4, r6, r0
+ ldr r0, [sp, #156]
+ str r4, [sp, #56] @ 4-byte Spill
+ adcs r5, r3, r0
+ ldr r0, [sp, #160]
+ ldr r3, [sp, #68] @ 4-byte Reload
+ str r5, [sp, #60] @ 4-byte Spill
+ adcs r6, r3, r0
+ ldr r0, [sp, #164]
+ ldr r3, [sp, #64] @ 4-byte Reload
+ str r6, [sp, #68] @ 4-byte Spill
+ adcs r8, r3, r0
+ ldr r0, [sp, #168]
+ ldr r3, [sp, #76] @ 4-byte Reload
+ str r8, [sp, #124] @ 4-byte Spill
+ adcs lr, r3, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adc r11, r0, #0
+ ldr r0, [sp, #116] @ 4-byte Reload
+ subs r3, r10, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #108] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #92] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #96] @ 4-byte Reload
+ sbcs r12, r12, r7
+ ldr r7, [sp, #100] @ 4-byte Reload
+ sbcs r7, r4, r7
+ ldr r4, [sp, #104] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #80] @ 4-byte Reload
+ sbcs r5, r6, r5
+ ldr r6, [sp, #84] @ 4-byte Reload
+ sbcs r9, r8, r6
+ ldr r6, [sp, #88] @ 4-byte Reload
+ sbcs r8, lr, r6
+ sbc r6, r11, #0
+ ands r11, r6, #1
+ ldr r6, [sp, #120] @ 4-byte Reload
+ movne r3, r10
+ str r3, [r6]
+ ldr r3, [sp, #36] @ 4-byte Reload
+ movne r0, r3
+ str r0, [r6, #4]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ cmp r11, #0
+ str r1, [r6, #8]
+ movne r2, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r2, [r6, #12]
+ movne r12, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r12, [r6, #16]
+ movne r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ cmp r11, #0
+ str r7, [r6, #20]
+ movne r4, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r4, [r6, #24]
+ movne r5, r0
+ ldr r0, [sp, #124] @ 4-byte Reload
+ str r5, [r6, #28]
+ movne r9, r0
+ cmp r11, #0
+ movne r8, lr
+ str r9, [r6, #32]
+ str r8, [r6, #36]
+ add sp, sp, #612
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end148:
+ .size mcl_fp_montRed10L, .Lfunc_end148-mcl_fp_montRed10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre10L
+ .align 2
+ .type mcl_fp_addPre10L,%function
+mcl_fp_addPre10L: @ @mcl_fp_addPre10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ ldm r1, {r3, r8, lr}
+ ldr r9, [r1, #12]
+ ldmib r2, {r5, r6, r7, r10}
+ ldr r4, [r2, #20]
+ ldr r11, [r2]
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ adds r12, r11, r3
+ ldr r11, [r2, #32]
+ adcs r5, r5, r8
+ ldr r8, [r1, #36]
+ adcs r6, r6, lr
+ add lr, r1, #16
+ adcs r7, r7, r9
+ str r4, [sp, #4] @ 4-byte Spill
+ ldr r4, [r2, #28]
+ ldr r2, [r2, #36]
+ str r4, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #8] @ 4-byte Spill
+ ldm lr, {r1, r2, r3, r4, lr}
+ str r12, [r0]
+ stmib r0, {r5, r6}
+ str r7, [r0, #12]
+ ldr r7, [sp] @ 4-byte Reload
+ adcs r1, r10, r1
+ str r1, [r0, #16]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r2, r7, r2
+ str r2, [r0, #20]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r1, r1, r3
+ str r1, [r0, #24]
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [r0, #28]
+ adcs r1, r11, lr
+ adcs r2, r2, r8
+ str r1, [r0, #32]
+ str r2, [r0, #36]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end149:
+ .size mcl_fp_addPre10L, .Lfunc_end149-mcl_fp_addPre10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre10L
+ .align 2
+ .type mcl_fp_subPre10L,%function
+mcl_fp_subPre10L: @ @mcl_fp_subPre10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #24
+ sub sp, sp, #24
+ ldr r3, [r2, #4]
+ ldr r7, [r2]
+ ldr r11, [r1]
+ ldr r6, [r1, #4]
+ ldr r9, [r2, #8]
+ ldr r5, [r1, #8]
+ ldr lr, [r2, #12]
+ ldr r4, [r1, #12]
+ ldr r12, [r1, #16]
+ ldr r8, [r1, #20]
+ ldr r10, [r1, #24]
+ str r3, [sp] @ 4-byte Spill
+ ldr r3, [r2, #16]
+ subs r7, r11, r7
+ ldr r11, [r2, #32]
+ str r7, [r0]
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [r2, #20]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r2, #24]
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ ldr r2, [r2, #36]
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [r1, #28]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [sp] @ 4-byte Reload
+ sbcs r6, r6, r3
+ sbcs r5, r5, r9
+ str r6, [r0, #4]
+ str r5, [r0, #8]
+ ldr r5, [sp, #8] @ 4-byte Reload
+ sbcs r4, r4, lr
+ ldr lr, [r1, #32]
+ ldr r1, [r1, #36]
+ str r4, [r0, #12]
+ ldr r4, [sp, #12] @ 4-byte Reload
+ sbcs r3, r12, r5
+ str r3, [r0, #16]
+ ldr r3, [sp, #16] @ 4-byte Reload
+ sbcs r7, r8, r4
+ str r7, [r0, #20]
+ ldr r7, [sp, #4] @ 4-byte Reload
+ sbcs r3, r10, r3
+ str r3, [r0, #24]
+ ldr r3, [sp, #20] @ 4-byte Reload
+ sbcs r3, r7, r3
+ str r3, [r0, #28]
+ sbcs r3, lr, r11
+ sbcs r1, r1, r2
+ str r3, [r0, #32]
+ str r1, [r0, #36]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #24
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end150:
+ .size mcl_fp_subPre10L, .Lfunc_end150-mcl_fp_subPre10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_10L
+ .align 2
+ .type mcl_fp_shr1_10L,%function
+mcl_fp_shr1_10L: @ @mcl_fp_shr1_10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr lr, [r1, #32]
+ ldr r12, [r1, #36]
+ ldr r8, [r1, #28]
+ ldm r1, {r1, r2, r3, r4, r5, r6, r9}
+ lsrs r7, r2, #1
+ rrx r1, r1
+ str r1, [r0]
+ lsr r1, r2, #1
+ lsr r2, r12, #1
+ orr r1, r1, r3, lsl #31
+ str r1, [r0, #4]
+ lsrs r1, r4, #1
+ rrx r1, r3
+ str r1, [r0, #8]
+ lsr r1, r4, #1
+ orr r1, r1, r5, lsl #31
+ str r1, [r0, #12]
+ lsrs r1, r6, #1
+ rrx r1, r5
+ str r1, [r0, #16]
+ lsr r1, r6, #1
+ orr r1, r1, r9, lsl #31
+ str r1, [r0, #20]
+ lsrs r1, r8, #1
+ rrx r1, r9
+ str r1, [r0, #24]
+ lsr r1, r8, #1
+ orr r1, r1, lr, lsl #31
+ str r1, [r0, #28]
+ lsrs r1, r12, #1
+ rrx r1, lr
+ str r1, [r0, #32]
+ str r2, [r0, #36]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end151:
+ .size mcl_fp_shr1_10L, .Lfunc_end151-mcl_fp_shr1_10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add10L
+ .align 2
+ .type mcl_fp_add10L,%function
+mcl_fp_add10L: @ @mcl_fp_add10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #20
+ sub sp, sp, #20
+ ldm r1, {r12, lr}
+ ldr r5, [r2]
+ ldr r10, [r1, #8]
+ ldr r8, [r1, #12]
+ ldmib r2, {r4, r6, r7}
+ adds r9, r5, r12
+ ldr r5, [r1, #24]
+ adcs lr, r4, lr
+ ldr r4, [r1, #20]
+ adcs r6, r6, r10
+ ldr r10, [r1, #36]
+ str lr, [sp] @ 4-byte Spill
+ str r6, [sp, #12] @ 4-byte Spill
+ adcs r12, r7, r8
+ ldr r6, [r1, #16]
+ ldr r7, [r2, #16]
+ adcs r6, r7, r6
+ ldr r7, [r2, #20]
+ str r6, [sp, #4] @ 4-byte Spill
+ adcs r8, r7, r4
+ ldr r4, [r2, #24]
+ adcs r6, r4, r5
+ ldr r4, [r1, #28]
+ ldr r5, [r2, #28]
+ str r6, [sp, #8] @ 4-byte Spill
+ adcs r7, r5, r4
+ ldr r5, [r1, #32]
+ ldr r1, [r2, #32]
+ ldr r2, [r2, #36]
+ stm r0, {r9, lr}
+ mov lr, r12
+ ldr r4, [sp, #4] @ 4-byte Reload
+ adcs r11, r1, r5
+ add r1, r0, #24
+ adcs r10, r2, r10
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r2, [r0, #8]
+ str lr, [r0, #12]
+ str r4, [r0, #16]
+ str r8, [r0, #20]
+ stm r1, {r6, r7, r11}
+ mov r1, #0
+ str r10, [r0, #36]
+ adc r1, r1, #0
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r3, {r1, r6, r12}
+ ldr r5, [r3, #12]
+ subs r9, r9, r1
+ ldr r1, [sp] @ 4-byte Reload
+ sbcs r6, r1, r6
+ sbcs r1, r2, r12
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ sbcs r12, lr, r5
+ sbcs lr, r4, r1
+ ldr r1, [r3, #20]
+ ldr r4, [sp, #16] @ 4-byte Reload
+ sbcs r8, r8, r1
+ ldr r1, [r3, #24]
+ sbcs r5, r2, r1
+ ldr r2, [r3, #28]
+ sbcs r1, r7, r2
+ ldr r2, [r3, #32]
+ ldr r7, [r3, #36]
+ sbcs r3, r11, r2
+ sbcs r2, r10, r7
+ sbc r4, r4, #0
+ tst r4, #1
+ bne .LBB152_2
+@ BB#1: @ %nocarry
+ ldr r4, [sp, #12] @ 4-byte Reload
+ str r9, [r0]
+ str r6, [r0, #4]
+ str r4, [r0, #8]
+ str r12, [r0, #12]
+ str lr, [r0, #16]
+ str r8, [r0, #20]
+ str r5, [r0, #24]
+ str r1, [r0, #28]
+ str r3, [r0, #32]
+ str r2, [r0, #36]
+.LBB152_2: @ %carry
+ add sp, sp, #20
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end152:
+ .size mcl_fp_add10L, .Lfunc_end152-mcl_fp_add10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF10L
+ .align 2
+ .type mcl_fp_addNF10L,%function
+mcl_fp_addNF10L: @ @mcl_fp_addNF10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #56
+ sub sp, sp, #56
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r5, [r2]
+ ldr r12, [r1, #12]
+ ldmib r2, {r4, r6, r7}
+ ldr r10, [r1, #24]
+ adds r9, r5, r9
+ ldr r5, [r1, #16]
+ adcs r11, r4, r8
+ ldr r8, [r1, #20]
+ str r9, [sp, #16] @ 4-byte Spill
+ adcs r6, r6, lr
+ str r11, [sp, #20] @ 4-byte Spill
+ str r6, [sp, #32] @ 4-byte Spill
+ adcs r6, r7, r12
+ ldr r7, [r2, #16]
+ str r6, [sp, #24] @ 4-byte Spill
+ adcs r4, r7, r5
+ ldr r7, [r2, #20]
+ ldr r5, [r2, #28]
+ str r4, [sp, #28] @ 4-byte Spill
+ adcs r7, r7, r8
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ adcs r7, r7, r10
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r1, #28]
+ adcs r7, r5, r7
+ ldr r5, [r1, #32]
+ ldr r1, [r1, #36]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ ldr r2, [r2, #36]
+ adcs lr, r7, r5
+ adc r1, r2, r1
+ str lr, [sp, #36] @ 4-byte Spill
+ str r1, [sp, #40] @ 4-byte Spill
+ ldmib r3, {r1, r2, r12}
+ ldr r7, [r3, #20]
+ ldr r8, [r3]
+ ldr r10, [sp, #32] @ 4-byte Reload
+ ldr r5, [r3, #16]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r3, #24]
+ subs r8, r9, r8
+ sbcs r1, r11, r1
+ ldr r11, [r3, #32]
+ sbcs r2, r10, r2
+ sbcs r12, r6, r12
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r3, #28]
+ ldr r3, [r3, #36]
+ sbcs r6, r4, r5
+ ldr r4, [sp, #4] @ 4-byte Reload
+ ldr r5, [sp, #8] @ 4-byte Reload
+ str r3, [sp] @ 4-byte Spill
+ ldr r3, [sp, #52] @ 4-byte Reload
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [sp, #12] @ 4-byte Reload
+ sbcs r3, r3, r4
+ ldr r4, [sp, #48] @ 4-byte Reload
+ sbcs r4, r4, r5
+ ldr r5, [sp, #44] @ 4-byte Reload
+ sbcs r9, r5, r7
+ ldr r7, [sp, #40] @ 4-byte Reload
+ ldr r5, [sp] @ 4-byte Reload
+ sbcs r11, lr, r11
+ sbc lr, r7, r5
+ ldr r5, [sp, #16] @ 4-byte Reload
+ asr r7, lr, #31
+ cmp r7, #0
+ movlt r2, r10
+ movlt r8, r5
+ ldr r5, [sp, #20] @ 4-byte Reload
+ str r8, [r0]
+ movlt r1, r5
+ cmp r7, #0
+ str r1, [r0, #4]
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r2, [r0, #8]
+ movlt r12, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r12, [r0, #12]
+ movlt r6, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r6, [r0, #16]
+ movlt r3, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ cmp r7, #0
+ str r3, [r0, #20]
+ movlt r4, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r4, [r0, #24]
+ movlt r9, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r9, [r0, #28]
+ movlt r11, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ cmp r7, #0
+ str r11, [r0, #32]
+ movlt lr, r1
+ str lr, [r0, #36]
+ add sp, sp, #56
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end153:
+ .size mcl_fp_addNF10L, .Lfunc_end153-mcl_fp_addNF10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub10L
+ .align 2
+ .type mcl_fp_sub10L,%function
+mcl_fp_sub10L: @ @mcl_fp_sub10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #36
+ sub sp, sp, #36
+ ldm r2, {r12, lr}
+ ldr r8, [r2, #8]
+ ldr r10, [r2, #12]
+ ldm r1, {r4, r5, r6, r7}
+ subs r4, r4, r12
+ ldr r12, [r1, #36]
+ sbcs r9, r5, lr
+ ldr r5, [r2, #20]
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ sbcs lr, r6, r8
+ ldr r6, [r2, #16]
+ sbcs r8, r7, r10
+ ldr r7, [r1, #16]
+ sbcs r10, r7, r6
+ ldr r6, [r1, #20]
+ sbcs r7, r6, r5
+ ldr r5, [r1, #24]
+ ldr r6, [r1, #32]
+ str r7, [sp, #28] @ 4-byte Spill
+ sbcs r11, r5, r4
+ ldr r4, [r2, #28]
+ ldr r5, [r1, #28]
+ sbcs r5, r5, r4
+ ldr r4, [r2, #32]
+ ldr r2, [r2, #36]
+ sbcs r1, r6, r4
+ mov r6, #0
+ sbcs r2, r12, r2
+ ldr r12, [sp, #32] @ 4-byte Reload
+ sbc r6, r6, #0
+ tst r6, #1
+ str r12, [r0]
+ stmib r0, {r9, lr}
+ str r8, [r0, #12]
+ str r10, [r0, #16]
+ str r7, [r0, #20]
+ mov r7, r11
+ str r7, [r0, #24]
+ str r5, [r0, #28]
+ str r1, [r0, #32]
+ str r2, [r0, #36]
+ beq .LBB154_2
+@ BB#1: @ %carry
+ ldr r4, [r3, #32]
+ str r4, [sp, #20] @ 4-byte Spill
+ ldr r4, [r3, #36]
+ str r4, [sp, #24] @ 4-byte Spill
+ ldmib r3, {r4, r11}
+ ldr r6, [r3, #12]
+ str r6, [sp] @ 4-byte Spill
+ ldr r6, [r3, #16]
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [r3, #20]
+ str r6, [sp, #8] @ 4-byte Spill
+ ldr r6, [r3, #24]
+ str r6, [sp, #12] @ 4-byte Spill
+ ldr r6, [r3, #28]
+ ldr r3, [r3]
+ adds r3, r3, r12
+ str r6, [sp, #16] @ 4-byte Spill
+ adcs r4, r4, r9
+ stm r0, {r3, r4}
+ adcs r3, r11, lr
+ str r3, [r0, #8]
+ ldr r3, [sp] @ 4-byte Reload
+ ldr r6, [sp, #8] @ 4-byte Reload
+ adcs r3, r3, r8
+ str r3, [r0, #12]
+ ldr r3, [sp, #4] @ 4-byte Reload
+ adcs r3, r3, r10
+ str r3, [r0, #16]
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r3, r6, r3
+ str r3, [r0, #20]
+ ldr r3, [sp, #12] @ 4-byte Reload
+ adcs r3, r3, r7
+ str r3, [r0, #24]
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adcs r3, r3, r5
+ str r3, [r0, #28]
+ ldr r3, [sp, #20] @ 4-byte Reload
+ adcs r1, r3, r1
+ ldr r3, [sp, #24] @ 4-byte Reload
+ str r1, [r0, #32]
+ adc r2, r3, r2
+ str r2, [r0, #36]
+.LBB154_2: @ %nocarry
+ add sp, sp, #36
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end154:
+ .size mcl_fp_sub10L, .Lfunc_end154-mcl_fp_sub10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF10L
+ .align 2
+ .type mcl_fp_subNF10L,%function
+mcl_fp_subNF10L: @ @mcl_fp_subNF10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #64
+ sub sp, sp, #64
+ mov r12, r0
+ ldr r0, [r2, #32]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r2, #36]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldm r2, {r4, r5}
+ ldr r0, [r2, #8]
+ ldr r7, [r2, #16]
+ ldr r8, [r2, #20]
+ ldr lr, [r1, #12]
+ ldr r6, [r1, #16]
+ ldr r11, [r1, #20]
+ ldr r9, [r1, #24]
+ ldr r10, [r1, #28]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r2, #12]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r2, #24]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r2, #28]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #8]
+ ldm r1, {r1, r2}
+ subs r1, r1, r4
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ sbcs r2, r2, r5
+ str r2, [sp, #16] @ 4-byte Spill
+ sbcs r4, r0, r1
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r4, [sp, #20] @ 4-byte Spill
+ sbcs r5, lr, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ sbcs r7, r6, r7
+ ldr r6, [sp, #44] @ 4-byte Reload
+ str r5, [sp, #28] @ 4-byte Spill
+ sbcs lr, r11, r8
+ str r7, [sp, #32] @ 4-byte Spill
+ str lr, [sp, #36] @ 4-byte Spill
+ sbcs r8, r9, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r8, [sp, #48] @ 4-byte Spill
+ sbcs r9, r10, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ str r9, [sp, #56] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ sbc r1, r6, r1
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldmib r3, {r1, r6}
+ ldr r11, [r3, #24]
+ ldr r10, [sp, #24] @ 4-byte Reload
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [r3, #12]
+ str r6, [sp] @ 4-byte Spill
+ ldr r6, [r3, #16]
+ str r6, [sp, #8] @ 4-byte Spill
+ ldr r6, [r3, #20]
+ str r6, [sp, #12] @ 4-byte Spill
+ ldr r6, [r3, #28]
+ ldr r3, [r3]
+ adds r3, r10, r3
+ adcs r1, r2, r1
+ ldr r2, [sp, #4] @ 4-byte Reload
+ adcs r2, r4, r2
+ ldr r4, [sp] @ 4-byte Reload
+ adcs r4, r5, r4
+ ldr r5, [sp, #8] @ 4-byte Reload
+ adcs r5, r7, r5
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adcs r7, lr, r7
+ adcs r11, r8, r11
+ adcs r8, r9, r6
+ ldr r6, [sp, #40] @ 4-byte Reload
+ adcs r9, r0, r6
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r6, [sp, #44] @ 4-byte Reload
+ asr lr, r0, #31
+ adc r6, r0, r6
+ cmp lr, #0
+ movge r3, r10
+ str r3, [r12]
+ ldr r3, [sp, #16] @ 4-byte Reload
+ movge r1, r3
+ str r1, [r12, #4]
+ ldr r1, [sp, #20] @ 4-byte Reload
+ movge r2, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ cmp lr, #0
+ str r2, [r12, #8]
+ movge r4, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r4, [r12, #12]
+ movge r5, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r5, [r12, #16]
+ movge r7, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ cmp lr, #0
+ str r7, [r12, #20]
+ movge r11, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r11, [r12, #24]
+ movge r8, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r8, [r12, #28]
+ movge r9, r1
+ cmp lr, #0
+ movge r6, r0
+ str r9, [r12, #32]
+ str r6, [r12, #36]
+ add sp, sp, #64
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end155:
+ .size mcl_fp_subNF10L, .Lfunc_end155-mcl_fp_subNF10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add10L
+ .align 2
+ .type mcl_fpDbl_add10L,%function
+mcl_fpDbl_add10L: @ @mcl_fpDbl_add10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #104
+ sub sp, sp, #104
+ ldm r1, {r7, r9}
+ ldr r8, [r1, #8]
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r10}
+ add lr, r1, #16
+ adds r7, r4, r7
+ ldr r4, [r2, #16]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #100] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #92] @ 4-byte Spill
+ adcs r7, r5, r9
+ str r7, [sp, #28] @ 4-byte Spill
+ adcs r7, r6, r8
+ ldr r8, [r2, #20]
+ str r7, [sp, #24] @ 4-byte Spill
+ adcs r7, r10, r12
+ add r10, r1, #32
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r2, [r1, #64]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r7, [sp] @ 4-byte Spill
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldm r10, {r7, r9, r10}
+ ldr r2, [r1, #48]
+ ldr r5, [r1, #44]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #36] @ 4-byte Reload
+ ldr r6, [sp, #28] @ 4-byte Reload
+ adcs r1, r4, r1
+ str r11, [r0]
+ str r6, [r0, #4]
+ ldr r6, [sp, #24] @ 4-byte Reload
+ ldr r4, [sp, #32] @ 4-byte Reload
+ adcs r2, r8, r2
+ str r6, [r0, #8]
+ str r4, [r0, #12]
+ str r1, [r0, #16]
+ ldr r1, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r1, r1, r12
+ str r1, [r0, #24]
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [r0, #28]
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [r0, #32]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r2, r2, r9
+ str r2, [r0, #36]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ adcs lr, r1, r10
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r4, r1, r5
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r4, [sp, #68] @ 4-byte Spill
+ adcs r12, r1, r2
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r12, [sp, #72] @ 4-byte Spill
+ adcs r5, r1, r2
+ ldr r1, [sp, #80] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r5, [sp, #76] @ 4-byte Spill
+ adcs r7, r1, r2
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r7, [sp, #80] @ 4-byte Spill
+ adcs r9, r1, r2
+ ldr r1, [sp, #88] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r9, [sp, #84] @ 4-byte Spill
+ adcs r10, r1, r2
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r10, [sp, #64] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #92] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ str r1, [sp, #88] @ 4-byte Spill
+ ldmib r3, {r1, r2, r8}
+ ldr r6, [r3, #16]
+ ldr r11, [r3]
+ str r6, [sp, #48] @ 4-byte Spill
+ ldr r6, [r3, #20]
+ subs r11, lr, r11
+ sbcs r1, r4, r1
+ sbcs r2, r12, r2
+ sbcs r12, r5, r8
+ ldr r8, [r3, #32]
+ ldr r5, [r3, #36]
+ str r6, [sp, #52] @ 4-byte Spill
+ ldr r6, [r3, #24]
+ str r6, [sp, #56] @ 4-byte Spill
+ ldr r6, [r3, #28]
+ ldr r3, [sp, #48] @ 4-byte Reload
+ str r6, [sp, #60] @ 4-byte Spill
+ sbcs r6, r7, r3
+ ldr r3, [sp, #52] @ 4-byte Reload
+ ldr r4, [sp, #60] @ 4-byte Reload
+ sbcs r7, r9, r3
+ ldr r3, [sp, #56] @ 4-byte Reload
+ sbcs r9, r10, r3
+ ldr r3, [sp, #100] @ 4-byte Reload
+ sbcs r10, r3, r4
+ ldr r3, [sp, #96] @ 4-byte Reload
+ ldr r4, [sp, #68] @ 4-byte Reload
+ sbcs r8, r3, r8
+ ldr r3, [sp, #92] @ 4-byte Reload
+ sbcs r5, r3, r5
+ ldr r3, [sp, #88] @ 4-byte Reload
+ sbc r3, r3, #0
+ ands r3, r3, #1
+ movne r11, lr
+ movne r1, r4
+ str r11, [r0, #40]
+ str r1, [r0, #44]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ cmp r3, #0
+ str r2, [r0, #48]
+ movne r12, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r12, [r0, #52]
+ movne r6, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r6, [r0, #56]
+ movne r7, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ cmp r3, #0
+ str r7, [r0, #60]
+ movne r9, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r9, [r0, #64]
+ movne r10, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r10, [r0, #68]
+ movne r8, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ cmp r3, #0
+ str r8, [r0, #72]
+ movne r5, r1
+ str r5, [r0, #76]
+ add sp, sp, #104
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end156:
+ .size mcl_fpDbl_add10L, .Lfunc_end156-mcl_fpDbl_add10L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub10L
+ .align 2
+ .type mcl_fpDbl_sub10L,%function
+mcl_fpDbl_sub10L: @ @mcl_fpDbl_sub10L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #96
+ sub sp, sp, #96
+ ldr r7, [r2, #64]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #52] @ 4-byte Spill
+ ldm r2, {r6, r7, r8, r9}
+ ldm r1, {r12, lr}
+ ldr r4, [r1, #8]
+ ldr r10, [r2, #20]
+ ldr r5, [r1, #12]
+ subs r11, r12, r6
+ ldr r6, [r2, #28]
+ sbcs r7, lr, r7
+ add lr, r1, #16
+ sbcs r8, r4, r8
+ ldr r4, [r2, #16]
+ sbcs r5, r5, r9
+ ldr r9, [r1, #32]
+ str r6, [sp, #28] @ 4-byte Spill
+ ldr r6, [r2, #24]
+ ldr r2, [r1, #64]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r6, [sp, #24] @ 4-byte Spill
+ ldr r6, [r1, #44]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #36]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ str r11, [r0]
+ stmib r0, {r7, r8}
+ str r5, [r0, #12]
+ ldr r7, [sp] @ 4-byte Reload
+ ldr r8, [r3, #20]
+ sbcs r1, r1, r4
+ str r1, [r0, #16]
+ sbcs r2, r2, r10
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ sbcs r1, r12, r1
+ str r1, [r0, #24]
+ sbcs r2, lr, r2
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r2, [r0, #28]
+ ldr r2, [sp, #56] @ 4-byte Reload
+ sbcs r1, r9, r1
+ sbcs r2, r7, r2
+ str r1, [r0, #32]
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r2, [r0, #36]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ sbcs r12, r2, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r12, [sp, #48] @ 4-byte Spill
+ sbcs r4, r6, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ sbcs r11, r2, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r11, [sp, #52] @ 4-byte Spill
+ sbcs r6, r2, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r6, [sp, #64] @ 4-byte Spill
+ sbcs r7, r2, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r7, [sp, #68] @ 4-byte Spill
+ sbcs r9, r2, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r9, [sp, #76] @ 4-byte Spill
+ sbcs r1, r2, r1
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r10, r2, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r10, [sp, #80] @ 4-byte Spill
+ sbcs lr, r2, r1
+ mov r1, #0
+ ldr r2, [r3, #4]
+ sbc r1, r1, #0
+ str lr, [sp, #84] @ 4-byte Spill
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [r3, #8]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [r3, #12]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ ldr r5, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ ldr r3, [r3]
+ str r1, [sp, #44] @ 4-byte Spill
+ adds r1, r12, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r2, r4, r2
+ adcs r3, r11, r3
+ adcs r12, r6, r5
+ ldr r6, [sp, #36] @ 4-byte Reload
+ ldr r5, [sp, #92] @ 4-byte Reload
+ adcs r6, r7, r6
+ ldr r7, [sp, #40] @ 4-byte Reload
+ adcs r8, r9, r8
+ adcs r9, r5, r7
+ ldr r5, [sp, #44] @ 4-byte Reload
+ ldr r7, [sp, #88] @ 4-byte Reload
+ adcs r7, r7, r5
+ ldr r5, [sp, #56] @ 4-byte Reload
+ adcs r11, r10, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ adc r10, lr, r5
+ ldr r5, [sp, #72] @ 4-byte Reload
+ ands lr, r5, #1
+ ldr r5, [sp, #48] @ 4-byte Reload
+ moveq r2, r4
+ moveq r1, r5
+ str r1, [r0, #40]
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r2, [r0, #44]
+ moveq r3, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ cmp lr, #0
+ str r3, [r0, #48]
+ moveq r12, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r12, [r0, #52]
+ moveq r6, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r6, [r0, #56]
+ moveq r8, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ cmp lr, #0
+ str r8, [r0, #60]
+ moveq r9, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r9, [r0, #64]
+ moveq r7, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r7, [r0, #68]
+ moveq r11, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ cmp lr, #0
+ str r11, [r0, #72]
+ moveq r10, r1
+ str r10, [r0, #76]
+ add sp, sp, #96
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end157:
+ .size mcl_fpDbl_sub10L, .Lfunc_end157-mcl_fpDbl_sub10L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv352x32,%function
+.LmulPv352x32: @ @mulPv352x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r3, [r1, #32]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #32]
+ ldr r3, [r1, #36]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #36]
+ ldr r1, [r1, #40]
+ umull r3, r7, r1, r2
+ adcs r1, r5, r3
+ str r1, [r0, #40]
+ adc r1, r7, #0
+ str r1, [r0, #44]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end158:
+ .size .LmulPv352x32, .Lfunc_end158-.LmulPv352x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre11L
+ .align 2
+ .type mcl_fp_mulUnitPre11L,%function
+mcl_fp_mulUnitPre11L: @ @mcl_fp_mulUnitPre11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, lr}
+ .pad #48
+ sub sp, sp, #48
+ mov r4, r0
+ mov r0, sp
+ bl .LmulPv352x32(PLT)
+ ldr r12, [sp, #44]
+ ldr lr, [sp, #40]
+ ldr r8, [sp, #36]
+ ldr r9, [sp, #32]
+ ldr r10, [sp, #28]
+ ldr r1, [sp, #24]
+ ldr r5, [sp, #20]
+ ldr r6, [sp, #16]
+ ldr r7, [sp]
+ ldmib sp, {r2, r3}
+ ldr r0, [sp, #12]
+ str r7, [r4]
+ stmib r4, {r2, r3}
+ str r0, [r4, #12]
+ str r6, [r4, #16]
+ str r5, [r4, #20]
+ str r1, [r4, #24]
+ str r10, [r4, #28]
+ str r9, [r4, #32]
+ str r8, [r4, #36]
+ str lr, [r4, #40]
+ str r12, [r4, #44]
+ add sp, sp, #48
+ pop {r4, r5, r6, r7, r8, r9, r10, lr}
+ mov pc, lr
+.Lfunc_end159:
+ .size mcl_fp_mulUnitPre11L, .Lfunc_end159-mcl_fp_mulUnitPre11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre11L
+ .align 2
+ .type mcl_fpDbl_mulPre11L,%function
+mcl_fpDbl_mulPre11L: @ @mcl_fpDbl_mulPre11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #604
+ sub sp, sp, #604
+ mov r3, r2
+ mov r4, r0
+ add r0, sp, #552
+ str r1, [sp, #68] @ 4-byte Spill
+ mov r5, r1
+ ldr r2, [r3]
+ str r3, [sp, #64] @ 4-byte Spill
+ str r4, [sp, #60] @ 4-byte Spill
+ mov r6, r3
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #596]
+ ldr r1, [sp, #560]
+ ldr r2, [r6, #4]
+ ldr r11, [sp, #556]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #592]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #564]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ str r1, [sp, #20] @ 4-byte Spill
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #576]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #572]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #568]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [r4]
+ add r0, sp, #504
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #548]
+ add r10, sp, #532
+ add r12, sp, #508
+ mov r6, r4
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r1, [sp, #504]
+ ldr lr, [sp, #528]
+ ldr r7, [sp, #524]
+ ldm r12, {r0, r2, r3, r12}
+ adds r1, r1, r11
+ str r1, [r4, #4]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r2, [r5, #8]
+ adcs r0, r8, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #456
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #500]
+ add r10, sp, #484
+ add r12, sp, #460
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #496]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr lr, [sp, #480]
+ ldr r7, [sp, #476]
+ ldr r1, [sp, #456]
+ ldm r12, {r0, r2, r3, r12}
+ ldr r11, [sp, #16] @ 4-byte Reload
+ adds r1, r1, r11
+ str r1, [r6, #8]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #12]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #408
+ bl .LmulPv352x32(PLT)
+ add r10, sp, #444
+ add lr, sp, #432
+ add r12, sp, #412
+ ldm r10, {r8, r9, r10}
+ ldm lr, {r6, r11, lr}
+ ldr r7, [sp, #428]
+ ldr r1, [sp, #408]
+ ldm r12, {r0, r2, r3, r12}
+ ldr r4, [sp, #16] @ 4-byte Reload
+ adds r1, r1, r4
+ ldr r4, [sp, #60] @ 4-byte Reload
+ str r1, [r4, #12]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #16]
+ ldr r5, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r3, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #360
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #404]
+ add r10, sp, #392
+ add r12, sp, #364
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr lr, [sp, #388]
+ ldr r6, [sp, #384]
+ ldr r7, [sp, #380]
+ ldr r1, [sp, #360]
+ ldm r12, {r0, r2, r3, r12}
+ ldr r11, [sp, #16] @ 4-byte Reload
+ adds r1, r1, r11
+ str r1, [r4, #16]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #20]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #312
+ bl .LmulPv352x32(PLT)
+ add r11, sp, #344
+ add r12, sp, #316
+ ldm r11, {r8, r9, r10, r11}
+ ldr lr, [sp, #340]
+ ldr r6, [sp, #336]
+ ldr r7, [sp, #332]
+ ldr r1, [sp, #312]
+ ldm r12, {r0, r2, r3, r12}
+ ldr r5, [sp, #16] @ 4-byte Reload
+ adds r1, r1, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ str r1, [r5, #20]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #24]
+ ldr r4, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ mov r1, r4
+ adcs r0, r3, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #264
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #308]
+ add r10, sp, #296
+ add r12, sp, #268
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr lr, [sp, #292]
+ ldr r6, [sp, #288]
+ ldr r7, [sp, #284]
+ ldr r1, [sp, #264]
+ ldm r12, {r0, r2, r3, r12}
+ ldr r11, [sp, #16] @ 4-byte Reload
+ adds r1, r1, r11
+ str r1, [r5, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #28]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #216
+ bl .LmulPv352x32(PLT)
+ add r10, sp, #252
+ add lr, sp, #240
+ add r12, sp, #220
+ ldm r10, {r8, r9, r10}
+ ldm lr, {r6, r11, lr}
+ ldr r7, [sp, #236]
+ ldr r1, [sp, #216]
+ ldm r12, {r0, r2, r3, r12}
+ ldr r4, [sp, #16] @ 4-byte Reload
+ adds r1, r1, r4
+ ldr r4, [sp, #60] @ 4-byte Reload
+ str r1, [r4, #28]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #32]
+ ldr r5, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mov r1, r5
+ adcs r0, r3, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #168
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #212]
+ add r10, sp, #200
+ add r12, sp, #172
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr lr, [sp, #196]
+ ldr r6, [sp, #192]
+ ldr r7, [sp, #188]
+ ldr r1, [sp, #168]
+ ldm r12, {r0, r2, r3, r12}
+ ldr r11, [sp, #12] @ 4-byte Reload
+ adds r1, r1, r11
+ ldr r11, [sp, #64] @ 4-byte Reload
+ str r1, [r4, #32]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r11, #36]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #120
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #164]
+ add lr, sp, #152
+ add r10, sp, #140
+ add r8, sp, #128
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm lr, {r9, r12, lr}
+ ldm r10, {r0, r6, r10}
+ ldr r2, [sp, #120]
+ ldr r3, [sp, #124]
+ ldm r8, {r1, r7, r8}
+ ldr r5, [sp, #12] @ 4-byte Reload
+ adds r2, r2, r5
+ ldr r5, [sp, #56] @ 4-byte Reload
+ str r2, [r4, #36]
+ ldr r2, [r11, #40]
+ adcs r11, r3, r5
+ ldr r3, [sp, #52] @ 4-byte Reload
+ adcs r5, r1, r3
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r7, r7, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r8, r8, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r10, r10, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #72
+ bl .LmulPv352x32(PLT)
+ add r3, sp, #72
+ ldm r3, {r0, r1, r2, r3}
+ ldr r9, [sp, #116]
+ ldr r6, [sp, #112]
+ adds r12, r0, r11
+ add r11, sp, #88
+ adcs lr, r1, r5
+ adcs r2, r2, r7
+ adcs r3, r3, r8
+ ldr r8, [sp, #108]
+ ldm r11, {r0, r1, r5, r7, r11}
+ str r12, [r4, #40]
+ str lr, [r4, #44]
+ str r2, [r4, #48]
+ ldr r2, [sp, #40] @ 4-byte Reload
+ add r12, r4, #72
+ str r3, [r4, #52]
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [r4, #56]
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r0, r5, r10
+ str r1, [r4, #60]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [r4, #64]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [r4, #68]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r11, r0
+ adcs r1, r8, r1
+ adcs r2, r6, r2
+ adc r3, r9, #0
+ stm r12, {r0, r1, r2, r3}
+ add sp, sp, #604
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end160:
+ .size mcl_fpDbl_mulPre11L, .Lfunc_end160-mcl_fpDbl_mulPre11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre11L
+ .align 2
+ .type mcl_fpDbl_sqrPre11L,%function
+mcl_fpDbl_sqrPre11L: @ @mcl_fpDbl_sqrPre11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #596
+ sub sp, sp, #596
+ mov r5, r1
+ mov r4, r0
+ add r0, sp, #544
+ ldr r2, [r5]
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #588]
+ ldr r1, [sp, #548]
+ ldr r2, [r5, #4]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #552]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #556]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #576]
+ str r1, [sp, #24] @ 4-byte Spill
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #572]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #568]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #564]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ str r0, [r4]
+ add r0, sp, #496
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #540]
+ add r10, sp, #520
+ add lr, sp, #496
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #4]
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #8]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #448
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #492]
+ add r10, sp, #476
+ add lr, sp, #448
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldr r7, [sp, #472]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #8]
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #12]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #400
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #444]
+ add r10, sp, #428
+ add lr, sp, #400
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldr r7, [sp, #424]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #12]
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #16]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #352
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #396]
+ add r10, sp, #380
+ add lr, sp, #352
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldr r7, [sp, #376]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #16]
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #20]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #304
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #348]
+ add r10, sp, #332
+ add lr, sp, #304
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldr r7, [sp, #328]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #20]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #24]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #256
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #300]
+ add r10, sp, #284
+ add lr, sp, #256
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldr r7, [sp, #280]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #24]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #28]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #208
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #252]
+ add r10, sp, #236
+ add lr, sp, #208
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldr r7, [sp, #232]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #28]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #32]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #160
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #204]
+ add r10, sp, #188
+ add lr, sp, #160
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldr r7, [sp, #184]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #16] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #32]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #36]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #112
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #156]
+ add lr, sp, #140
+ add r12, sp, #124
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #152]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r8, r11, lr}
+ ldr r9, [sp, #136]
+ ldr r2, [sp, #112]
+ ldr r7, [sp, #116]
+ ldr r6, [sp, #120]
+ ldm r12, {r0, r3, r12}
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adds r2, r2, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r2, [r4, #36]
+ ldr r2, [r5, #40]
+ adcs r7, r7, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r6, r6, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r11, r11, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #64
+ bl .LmulPv352x32(PLT)
+ add r3, sp, #64
+ ldm r3, {r0, r1, r2, r3}
+ ldr r9, [sp, #108]
+ ldr r8, [sp, #104]
+ adds r12, r0, r7
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs lr, r1, r6
+ adcs r2, r2, r10
+ add r10, sp, #80
+ adcs r3, r3, r0
+ ldm r10, {r0, r1, r5, r6, r7, r10}
+ str r12, [r4, #40]
+ str lr, [r4, #44]
+ str r2, [r4, #48]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ add r12, r4, #72
+ str r3, [r4, #52]
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [r4, #56]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r1, [r4, #60]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [r4, #64]
+ adcs r0, r6, r11
+ str r0, [r4, #68]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r7, r0
+ adcs r1, r10, r1
+ adcs r2, r8, r2
+ adc r3, r9, #0
+ stm r12, {r0, r1, r2, r3}
+ add sp, sp, #596
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end161:
+ .size mcl_fpDbl_sqrPre11L, .Lfunc_end161-mcl_fpDbl_sqrPre11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont11L
+ .align 2
+ .type mcl_fp_mont11L,%function
+mcl_fp_mont11L: @ @mcl_fp_mont11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #132
+ sub sp, sp, #132
+ .pad #1024
+ sub sp, sp, #1024
+ mov r7, r2
+ ldr r5, [r3, #-4]
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #1104
+ str r3, [sp, #92] @ 4-byte Spill
+ str r1, [sp, #84] @ 4-byte Spill
+ mov r4, r3
+ mov r6, r1
+ ldr r2, [r7]
+ str r7, [sp, #80] @ 4-byte Spill
+ str r5, [sp, #88] @ 4-byte Spill
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #1108]
+ ldr r8, [sp, #1104]
+ mov r1, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1112]
+ mul r2, r8, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1116]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #1140]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1132]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1128]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1124]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1120]
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #1056
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #1100]
+ ldr r2, [r7, #4]
+ ldr r11, [sp, #1072]
+ ldr r5, [sp, #1056]
+ ldr r4, [sp, #1060]
+ ldr r10, [sp, #1064]
+ ldr r9, [sp, #1068]
+ mov r1, r6
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1096]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1092]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1088]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1084]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1080]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1076]
+ str r0, [sp, #4] @ 4-byte Spill
+ add r0, sp, #1008
+ bl .LmulPv352x32(PLT)
+ adds r0, r5, r8
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ add lr, sp, #1008
+ ldr r7, [sp, #1044]
+ ldr r6, [sp, #1040]
+ ldr r5, [sp, #1036]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r8, r4, r0
+ mov r0, #0
+ ldr r4, [sp, #1032]
+ adcs r1, r10, r1
+ ldr r10, [sp, #1052]
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r9, r1
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r11, r1
+ ldr r11, [sp, #1048]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r2, r1
+ str r1, [sp, #28] @ 4-byte Spill
+ adc r9, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r8, r8, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r9, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, sp, #960
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #1004]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r5, [sp, #984]
+ ldr r6, [sp, #980]
+ ldr r9, [sp, #976]
+ ldr r10, [sp, #960]
+ ldr r11, [sp, #964]
+ ldr r7, [sp, #968]
+ ldr r4, [sp, #972]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1000]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #996]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #988]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #912
+ bl .LmulPv352x32(PLT)
+ adds r0, r8, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #916
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #940
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldm r11, {r5, r6, r7, r8, r11}
+ ldr r4, [sp, #912]
+ adc r10, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r9, [sp, #76] @ 4-byte Reload
+ adds r9, r9, r4
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r10, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r9, r0
+ add r0, sp, #864
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #908]
+ add r11, sp, #864
+ ldr r7, [sp, #888]
+ ldr r5, [sp, #884]
+ ldr r8, [sp, #880]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #904]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #900]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #896]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r6, [sp, #876]
+ ldr r2, [r0, #12]
+ add r0, sp, #816
+ bl .LmulPv352x32(PLT)
+ adds r0, r4, r9
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #816
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #840
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adds r8, r7, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, sp, #768
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #812]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r5, [sp, #792]
+ ldr r6, [sp, #788]
+ ldr r9, [sp, #784]
+ ldr r10, [sp, #768]
+ ldr r11, [sp, #772]
+ ldr r7, [sp, #776]
+ ldr r4, [sp, #780]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #720
+ bl .LmulPv352x32(PLT)
+ adds r0, r8, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #724
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #748
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldm r11, {r5, r6, r7, r8, r11}
+ ldr r4, [sp, #720]
+ adc r10, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r9, [sp, #76] @ 4-byte Reload
+ adds r9, r9, r4
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r10, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r9, r0
+ add r0, sp, #672
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #716]
+ add r11, sp, #672
+ ldr r7, [sp, #696]
+ ldr r5, [sp, #692]
+ ldr r8, [sp, #688]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r6, [sp, #684]
+ ldr r2, [r0, #20]
+ add r0, sp, #624
+ bl .LmulPv352x32(PLT)
+ adds r0, r4, r9
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #624
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #648
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adds r8, r7, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, sp, #576
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #620]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r5, [sp, #600]
+ ldr r6, [sp, #596]
+ ldr r9, [sp, #592]
+ ldr r10, [sp, #576]
+ ldr r11, [sp, #580]
+ ldr r7, [sp, #584]
+ ldr r4, [sp, #588]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #616]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #612]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #608]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #604]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #528
+ bl .LmulPv352x32(PLT)
+ adds r0, r8, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #532
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #556
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldm r11, {r5, r6, r7, r8, r11}
+ ldr r4, [sp, #528]
+ adc r10, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r9, [sp, #76] @ 4-byte Reload
+ adds r9, r9, r4
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r10, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r9, r0
+ add r0, sp, #480
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #524]
+ add r11, sp, #480
+ ldr r7, [sp, #504]
+ ldr r5, [sp, #500]
+ ldr r8, [sp, #496]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #520]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #516]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #512]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r6, [sp, #492]
+ ldr r2, [r0, #28]
+ add r0, sp, #432
+ bl .LmulPv352x32(PLT)
+ adds r0, r4, r9
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #432
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #456
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adds r8, r7, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, sp, #384
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #428]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r5, [sp, #408]
+ ldr r6, [sp, #404]
+ ldr r9, [sp, #400]
+ ldr r10, [sp, #384]
+ ldr r11, [sp, #388]
+ ldr r7, [sp, #392]
+ ldr r4, [sp, #396]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #424]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #420]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #336
+ bl .LmulPv352x32(PLT)
+ adds r0, r8, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #340
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #364
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldm r11, {r5, r6, r7, r8, r11}
+ ldr r4, [sp, #336]
+ adc r10, r0, #0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r9, [sp, #76] @ 4-byte Reload
+ adds r9, r9, r4
+ ldr r4, [sp, #68] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ adcs r0, r10, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r9, r0
+ add r0, sp, #288
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #332]
+ add r11, sp, #288
+ ldr r7, [sp, #312]
+ ldr r5, [sp, #308]
+ ldr r8, [sp, #304]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #328]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #324]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #320]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #316]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r6, [sp, #300]
+ ldr r2, [r0, #36]
+ add r0, sp, #240
+ bl .LmulPv352x32(PLT)
+ adds r0, r4, r9
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #240
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #264
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adds r8, r7, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, sp, #192
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #236]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r5, [sp, #216]
+ ldr r6, [sp, #212]
+ ldr r9, [sp, #208]
+ ldr r10, [sp, #192]
+ ldr r11, [sp, #196]
+ ldr r7, [sp, #200]
+ ldr r4, [sp, #204]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #232]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #228]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #224]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #220]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #144
+ bl .LmulPv352x32(PLT)
+ adds r0, r8, r10
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ add lr, sp, #144
+ add r12, sp, #160
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ adcs r10, r1, r7
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r11, r1, r4
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r9
+ add r9, sp, #180
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #48] @ 4-byte Spill
+ ldm lr, {r2, r6, lr}
+ ldr r5, [sp, #156]
+ adds r4, r0, r2
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r6, r10, r6
+ mul r1, r4, r0
+ str r1, [sp, #44] @ 4-byte Spill
+ ldm r9, {r7, r8, r9}
+ ldm r12, {r0, r1, r2, r3, r12}
+ str r6, [sp, #40] @ 4-byte Spill
+ adcs r6, r11, lr
+ ldr r10, [sp, #92] @ 4-byte Reload
+ str r6, [sp, #36] @ 4-byte Spill
+ ldr r6, [sp, #84] @ 4-byte Reload
+ adcs r11, r6, r5
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adcs r6, r6, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r5, r0, r3
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r8, r0, r9
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #96
+ bl .LmulPv352x32(PLT)
+ add r7, sp, #96
+ ldm r7, {r0, r1, r3, r7}
+ adds r0, r4, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs lr, r0, r1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ str lr, [sp, #44] @ 4-byte Spill
+ adcs r1, r0, r3
+ ldr r3, [sp, #112]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r9, r11, r7
+ str r1, [sp, #48] @ 4-byte Spill
+ adcs r6, r6, r3
+ ldr r3, [sp, #116]
+ str r6, [sp, #52] @ 4-byte Spill
+ adcs r0, r0, r3
+ ldr r3, [sp, #120]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r12, r0, r3
+ ldr r3, [sp, #124]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r12, [sp, #56] @ 4-byte Spill
+ adcs r5, r5, r3
+ ldr r3, [sp, #128]
+ str r5, [sp, #60] @ 4-byte Spill
+ adcs r0, r0, r3
+ ldr r3, [sp, #132]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ ldr r3, [sp, #136]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ ldr r3, [sp, #140]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r8, r8, r3
+ adc r0, r0, #0
+ str r8, [sp, #68] @ 4-byte Spill
+ str r0, [sp, #64] @ 4-byte Spill
+ ldmib r10, {r3, r7}
+ ldr r4, [r10, #16]
+ ldr r11, [r10]
+ ldr r2, [r10, #12]
+ mov r0, r10
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [r10, #20]
+ subs r11, lr, r11
+ ldr lr, [sp, #84] @ 4-byte Reload
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r10, #24]
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r10, #28]
+ sbcs r10, r1, r3
+ mov r3, r9
+ ldr r9, [r0, #32]
+ sbcs r1, r3, r7
+ ldr r7, [r0, #36]
+ ldr r0, [r0, #40]
+ sbcs r2, r6, r2
+ ldr r6, [sp, #36] @ 4-byte Reload
+ str r4, [sp, #40] @ 4-byte Spill
+ ldr r4, [sp, #28] @ 4-byte Reload
+ sbcs lr, lr, r4
+ ldr r4, [sp, #32] @ 4-byte Reload
+ sbcs r4, r12, r4
+ ldr r12, [sp, #88] @ 4-byte Reload
+ sbcs r5, r5, r6
+ ldr r6, [sp, #40] @ 4-byte Reload
+ sbcs r12, r12, r6
+ ldr r6, [sp, #80] @ 4-byte Reload
+ sbcs r9, r6, r9
+ ldr r6, [sp, #76] @ 4-byte Reload
+ sbcs r7, r6, r7
+ ldr r6, [sp, #64] @ 4-byte Reload
+ sbcs r0, r8, r0
+ ldr r8, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbc r6, r6, #0
+ ands r6, r6, #1
+ movne r11, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ movne r1, r3
+ str r11, [r8]
+ movne r10, r0
+ cmp r6, #0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r10, [r8, #4]
+ str r1, [r8, #8]
+ ldr r1, [sp, #52] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r2, [r8, #12]
+ movne lr, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str lr, [r8, #16]
+ movne r4, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ cmp r6, #0
+ str r4, [r8, #20]
+ movne r5, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r5, [r8, #24]
+ movne r12, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r12, [r8, #28]
+ movne r9, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ cmp r6, #0
+ str r9, [r8, #32]
+ movne r7, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r7, [r8, #36]
+ movne r0, r1
+ str r0, [r8, #40]
+ add sp, sp, #132
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end162:
+ .size mcl_fp_mont11L, .Lfunc_end162-mcl_fp_mont11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF11L
+ .align 2
+ .type mcl_fp_montNF11L,%function
+mcl_fp_montNF11L: @ @mcl_fp_montNF11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #124
+ sub sp, sp, #124
+ .pad #1024
+ sub sp, sp, #1024
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ add r6, sp, #1024
+ str r0, [sp, #68] @ 4-byte Spill
+ str r3, [sp, #84] @ 4-byte Spill
+ str r1, [sp, #76] @ 4-byte Spill
+ mov r4, r3
+ add r0, r6, #72
+ str r5, [sp, #80] @ 4-byte Spill
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #1100]
+ ldr r10, [sp, #1096]
+ add r9, sp, #1024
+ mov r1, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1104]
+ mul r2, r10, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1108]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1140]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1132]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1128]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1124]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1120]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1116]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1112]
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, r9, #24
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #1092]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r5, [sp, #1072]
+ ldr r7, [sp, #1068]
+ ldr r8, [sp, #1064]
+ ldr r11, [sp, #1048]
+ ldr r4, [sp, #1052]
+ ldr r6, [sp, #1056]
+ ldr r9, [sp, #1060]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1088]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1084]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1080]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1076]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, sp, #1000
+ bl .LmulPv352x32(PLT)
+ adds r0, r11, r10
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add r11, sp, #1024
+ add lr, sp, #1000
+ ldr r10, [sp, #1044]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r8, r11}
+ adc r9, r1, r0
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #952
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #996]
+ add r11, sp, #952
+ ldr r6, [sp, #976]
+ ldr r4, [sp, #972]
+ ldr r8, [sp, #968]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #988]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #984]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #980]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r5, [sp, #964]
+ ldr r2, [r0, #8]
+ add r0, sp, #904
+ bl .LmulPv352x32(PLT)
+ adds r0, r7, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #908
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #948]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #932
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r5, r6, r9, r11}
+ ldr r4, [sp, #904]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r8, [sp, #64] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adds r4, r8, r4
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #856
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #900]
+ add r11, sp, #856
+ ldr r7, [sp, #880]
+ ldr r5, [sp, #876]
+ ldr r8, [sp, #872]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #896]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #888]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #884]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r6, [sp, #868]
+ ldr r2, [r0, #12]
+ add r0, sp, #808
+ bl .LmulPv352x32(PLT)
+ adds r0, r4, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #808
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #852]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #832
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r8, r11}
+ adc r9, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #760
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #804]
+ add r11, sp, #760
+ ldr r6, [sp, #784]
+ ldr r4, [sp, #780]
+ ldr r8, [sp, #776]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r5, [sp, #772]
+ ldr r2, [r0, #16]
+ add r0, sp, #712
+ bl .LmulPv352x32(PLT)
+ adds r0, r7, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #716
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #756]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #740
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r5, r6, r9, r11}
+ ldr r4, [sp, #712]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r8, [sp, #64] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adds r4, r8, r4
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #664
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #708]
+ add r11, sp, #664
+ ldr r7, [sp, #688]
+ ldr r5, [sp, #684]
+ ldr r8, [sp, #680]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r6, [sp, #676]
+ ldr r2, [r0, #20]
+ add r0, sp, #616
+ bl .LmulPv352x32(PLT)
+ adds r0, r4, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #616
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #660]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #640
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r8, r11}
+ adc r9, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #568
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #612]
+ add r11, sp, #568
+ ldr r6, [sp, #592]
+ ldr r4, [sp, #588]
+ ldr r8, [sp, #584]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #608]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #604]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #600]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r5, [sp, #580]
+ ldr r2, [r0, #24]
+ add r0, sp, #520
+ bl .LmulPv352x32(PLT)
+ adds r0, r7, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #524
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #564]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #548
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r5, r6, r9, r11}
+ ldr r4, [sp, #520]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r8, [sp, #64] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adds r4, r8, r4
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #472
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #516]
+ add r11, sp, #472
+ ldr r7, [sp, #496]
+ ldr r5, [sp, #492]
+ ldr r8, [sp, #488]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #512]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #500]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r6, [sp, #484]
+ ldr r2, [r0, #28]
+ add r0, sp, #424
+ bl .LmulPv352x32(PLT)
+ adds r0, r4, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #424
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #468]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #448
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r8, r11}
+ adc r9, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #376
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #420]
+ add r11, sp, #376
+ ldr r6, [sp, #400]
+ ldr r4, [sp, #396]
+ ldr r8, [sp, #392]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #404]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r5, [sp, #388]
+ ldr r2, [r0, #32]
+ add r0, sp, #328
+ bl .LmulPv352x32(PLT)
+ adds r0, r7, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #332
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #372]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #356
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r5, r6, r9, r11}
+ ldr r4, [sp, #328]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r8, [sp, #64] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adds r4, r8, r4
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r4, r0
+ add r0, sp, #280
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #324]
+ add r11, sp, #280
+ ldr r7, [sp, #304]
+ ldr r5, [sp, #300]
+ ldr r8, [sp, #296]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #320]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #316]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #312]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #308]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r6, [sp, #292]
+ ldr r2, [r0, #36]
+ add r0, sp, #232
+ bl .LmulPv352x32(PLT)
+ adds r0, r4, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #232
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #276]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #256
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldm r11, {r4, r5, r6, r8, r11}
+ adc r9, r0, r1
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #32] @ 4-byte Spill
+ adcs r0, r9, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #184
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #228]
+ add r11, sp, #184
+ ldr r6, [sp, #208]
+ ldr r4, [sp, #204]
+ ldr r8, [sp, #200]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #224]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #220]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #216]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #212]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r5, [sp, #196]
+ ldr r2, [r0, #40]
+ add r0, sp, #136
+ bl .LmulPv352x32(PLT)
+ adds r0, r7, r9
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ ldr lr, [sp, #140]
+ add r9, sp, #172
+ add r12, sp, #152
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ adcs r11, r1, r11
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r10, r1, r5
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r4
+ ldr r4, [sp, #148]
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #144]
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #136]
+ str r1, [sp, #44] @ 4-byte Spill
+ adds r5, r0, r2
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r11, r11, lr
+ adcs r6, r10, r6
+ mul r1, r5, r0
+ str r1, [sp, #40] @ 4-byte Spill
+ ldm r9, {r7, r8, r9}
+ ldm r12, {r0, r1, r2, r3, r12}
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adcs r10, r6, r4
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #88
+ adc r9, r9, #0
+ bl .LmulPv352x32(PLT)
+ add r7, sp, #88
+ ldm r7, {r0, r1, r3, r7}
+ adds r0, r5, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r8, r11, r1
+ str r8, [sp, #28] @ 4-byte Spill
+ adcs r6, r0, r3
+ ldr r3, [sp, #104]
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r2, r10, r7
+ str r6, [sp, #44] @ 4-byte Spill
+ str r2, [sp, #48] @ 4-byte Spill
+ adcs r7, r0, r3
+ ldr r3, [sp, #108]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r7, [sp, #52] @ 4-byte Spill
+ adcs r0, r0, r3
+ ldr r3, [sp, #112]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r5, r0, r3
+ ldr r3, [sp, #116]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r5, [sp, #56] @ 4-byte Spill
+ adcs lr, r0, r3
+ ldr r3, [sp, #120]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str lr, [sp, #60] @ 4-byte Spill
+ adcs r0, r0, r3
+ ldr r3, [sp, #124]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ ldr r3, [sp, #128]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r10, r0, r3
+ ldr r3, [sp, #132]
+ str r10, [sp, #64] @ 4-byte Spill
+ adc r12, r9, r3
+ mov r3, r4
+ str r12, [sp, #40] @ 4-byte Spill
+ ldmib r3, {r0, r1, r9}
+ ldr r4, [r3, #16]
+ ldr r11, [r3]
+ str r4, [sp, #20] @ 4-byte Spill
+ ldr r4, [r3, #20]
+ subs r11, r8, r11
+ ldr r8, [r3, #36]
+ sbcs r0, r6, r0
+ sbcs r1, r2, r1
+ sbcs r2, r7, r9
+ ldr r9, [r3, #32]
+ ldr r7, [sp, #80] @ 4-byte Reload
+ str r4, [sp, #24] @ 4-byte Spill
+ ldr r4, [r3, #24]
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r3, #28]
+ ldr r3, [r3, #40]
+ str r4, [sp, #36] @ 4-byte Spill
+ str r3, [sp, #84] @ 4-byte Spill
+ ldr r3, [sp, #72] @ 4-byte Reload
+ ldr r4, [sp, #20] @ 4-byte Reload
+ ldr r6, [sp, #36] @ 4-byte Reload
+ sbcs r3, r3, r4
+ ldr r4, [sp, #24] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #32] @ 4-byte Reload
+ sbcs r5, lr, r5
+ sbcs lr, r7, r6
+ ldr r7, [sp, #76] @ 4-byte Reload
+ ldr r6, [sp, #84] @ 4-byte Reload
+ sbcs r9, r7, r9
+ ldr r7, [sp, #28] @ 4-byte Reload
+ sbcs r10, r10, r8
+ ldr r8, [sp, #68] @ 4-byte Reload
+ sbc r12, r12, r6
+ asr r6, r12, #31
+ cmp r6, #0
+ movlt r11, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r11, [r8]
+ movlt r0, r7
+ str r0, [r8, #4]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ movlt r1, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ cmp r6, #0
+ str r1, [r8, #8]
+ movlt r2, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r2, [r8, #12]
+ movlt r3, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r3, [r8, #16]
+ movlt r4, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ cmp r6, #0
+ str r4, [r8, #20]
+ movlt r5, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r5, [r8, #24]
+ movlt lr, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str lr, [r8, #28]
+ movlt r9, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ cmp r6, #0
+ movlt r10, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ movlt r12, r0
+ add r0, r8, #32
+ stm r0, {r9, r10, r12}
+ add sp, sp, #124
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end163:
+ .size mcl_fp_montNF11L, .Lfunc_end163-mcl_fp_montNF11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed11L
+ .align 2
+ .type mcl_fp_montRed11L,%function
+mcl_fp_montRed11L: @ @mcl_fp_montRed11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #676
+ sub sp, sp, #676
+ mov r10, r2
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r5, [r1]
+ ldr r0, [r10]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [r10, #4]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [r10, #8]
+ str r2, [sp, #56] @ 4-byte Spill
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [r10, #12]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [r10, #16]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [r10, #20]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [r10, #24]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [r10, #-4]
+ str r0, [sp, #140] @ 4-byte Spill
+ mul r2, r5, r0
+ ldr r0, [r10, #28]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r10, #32]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r10, #36]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r10, #40]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #72]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #76]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r1, #80]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #84]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ mov r1, r10
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #624
+ bl .LmulPv352x32(PLT)
+ add r11, sp, #656
+ add lr, sp, #624
+ ldm r11, {r4, r8, r9, r11}
+ ldr r7, [sp, #652]
+ ldr r6, [sp, #648]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r5, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r5, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ mov r1, r10
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ mul r2, r5, r0
+ add r0, sp, #576
+ bl .LmulPv352x32(PLT)
+ ldr r4, [sp, #576]
+ add r9, sp, #584
+ ldr r12, [sp, #620]
+ ldr lr, [sp, #616]
+ ldr r2, [sp, #612]
+ ldr r3, [sp, #608]
+ ldr r11, [sp, #604]
+ ldr r7, [sp, #600]
+ ldr r6, [sp, #580]
+ ldm r9, {r0, r1, r8, r9}
+ adds r4, r5, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r5, r4, r6
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ mov r9, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r5, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #528
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #572]
+ add r11, sp, #560
+ add lr, sp, #528
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r8, r11}
+ ldr r6, [sp, #556]
+ ldr r7, [sp, #552]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r9, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ mov r5, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r4
+ mov r1, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #480
+ bl .LmulPv352x32(PLT)
+ ldr r4, [sp, #480]
+ add r9, sp, #488
+ ldr r12, [sp, #524]
+ ldr lr, [sp, #520]
+ ldr r2, [sp, #516]
+ ldr r3, [sp, #512]
+ ldr r11, [sp, #508]
+ ldr r7, [sp, #504]
+ ldr r6, [sp, #484]
+ ldm r9, {r0, r1, r8, r9}
+ adds r4, r5, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r5, r4, r6
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r5, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #432
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #476]
+ add r11, sp, #460
+ add lr, sp, #432
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r11, {r6, r8, r9, r11}
+ ldr r7, [sp, #456]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r5, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r4
+ mov r4, r1
+ mov r1, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #384
+ bl .LmulPv352x32(PLT)
+ ldr r6, [sp, #384]
+ add r9, sp, #392
+ ldr r12, [sp, #428]
+ ldr lr, [sp, #424]
+ ldr r2, [sp, #420]
+ ldr r3, [sp, #416]
+ ldr r11, [sp, #412]
+ ldr r5, [sp, #408]
+ ldr r7, [sp, #388]
+ ldm r9, {r0, r1, r8, r9}
+ adds r4, r4, r6
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r6, r4, r7
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ mov r5, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r6, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #336
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #380]
+ add r11, sp, #364
+ add lr, sp, #336
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r6, r8, r9, r11}
+ ldr r7, [sp, #360]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r5, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r4
+ mov r4, r1
+ mov r1, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #288
+ bl .LmulPv352x32(PLT)
+ ldr r6, [sp, #288]
+ add r9, sp, #296
+ ldr r12, [sp, #332]
+ ldr lr, [sp, #328]
+ ldr r2, [sp, #324]
+ ldr r3, [sp, #320]
+ ldr r11, [sp, #316]
+ ldr r5, [sp, #312]
+ ldr r7, [sp, #292]
+ ldm r9, {r0, r1, r8, r9}
+ adds r4, r4, r6
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r6, r4, r7
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ mov r5, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r6, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #240
+ bl .LmulPv352x32(PLT)
+ ldr r0, [sp, #284]
+ add r11, sp, #264
+ add lr, sp, #240
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r6, r7, r8, r9, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r5, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r5, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r5, r4
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r11, r0, r11
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #192
+ bl .LmulPv352x32(PLT)
+ add r6, sp, #192
+ add r7, sp, #208
+ ldm r6, {r0, r1, r3, r6}
+ ldr r12, [sp, #236]
+ ldr lr, [sp, #232]
+ adds r0, r5, r0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #16] @ 4-byte Reload
+ mul r2, r8, r4
+ adcs r0, r0, r3
+ ldr r3, [sp, #228]
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #224]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldm r7, {r0, r1, r4, r7}
+ ldr r5, [sp, #88] @ 4-byte Reload
+ adcs r9, r5, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r4, r0, r4
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r5, r0, r6
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r11, r11, r3
+ adcs r0, r0, lr
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r6, r0, #0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ add r0, sp, #144
+ bl .LmulPv352x32(PLT)
+ add r3, sp, #144
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r8, r0
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r12, r0, r1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r1, [sp, #160]
+ str r12, [sp, #44] @ 4-byte Spill
+ adcs r2, r0, r2
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r3, r9, r3
+ str r2, [sp, #52] @ 4-byte Spill
+ str r3, [sp, #56] @ 4-byte Spill
+ adcs r7, r0, r1
+ ldr r1, [sp, #164]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r7, [sp, #60] @ 4-byte Spill
+ adcs r8, r4, r1
+ ldr r1, [sp, #168]
+ str r8, [sp, #64] @ 4-byte Spill
+ adcs r4, r0, r1
+ ldr r1, [sp, #172]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r4, [sp, #68] @ 4-byte Spill
+ adcs r5, r5, r1
+ ldr r1, [sp, #176]
+ str r5, [sp, #72] @ 4-byte Spill
+ adcs r11, r11, r1
+ ldr r1, [sp, #180]
+ str r11, [sp, #76] @ 4-byte Spill
+ adcs r9, r0, r1
+ ldr r1, [sp, #184]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r9, [sp, #84] @ 4-byte Spill
+ adcs lr, r0, r1
+ ldr r1, [sp, #188]
+ str lr, [sp, #88] @ 4-byte Spill
+ adcs r0, r6, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r6, [sp, #140] @ 4-byte Reload
+ adc r10, r0, #0
+ ldr r0, [sp, #132] @ 4-byte Reload
+ subs r0, r12, r0
+ sbcs r1, r2, r1
+ ldr r2, [sp, #124] @ 4-byte Reload
+ sbcs r2, r3, r2
+ ldr r3, [sp, #108] @ 4-byte Reload
+ sbcs r3, r7, r3
+ ldr r7, [sp, #112] @ 4-byte Reload
+ sbcs r12, r8, r7
+ ldr r7, [sp, #116] @ 4-byte Reload
+ sbcs r8, r4, r7
+ ldr r4, [sp, #120] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #92] @ 4-byte Reload
+ sbcs r5, r11, r5
+ sbcs r11, r9, r7
+ ldr r7, [sp, #100] @ 4-byte Reload
+ sbcs r9, lr, r7
+ ldr r7, [sp, #104] @ 4-byte Reload
+ sbcs lr, r6, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ sbc r6, r10, #0
+ ldr r10, [sp, #136] @ 4-byte Reload
+ ands r6, r6, #1
+ movne r0, r7
+ str r0, [r10]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r1, [r10, #4]
+ movne r2, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ cmp r6, #0
+ str r2, [r10, #8]
+ movne r3, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r3, [r10, #12]
+ movne r12, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r12, [r10, #16]
+ movne r8, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ cmp r6, #0
+ str r8, [r10, #20]
+ movne r4, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r4, [r10, #24]
+ movne r5, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r5, [r10, #28]
+ movne r11, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ cmp r6, #0
+ str r11, [r10, #32]
+ movne r9, r0
+ ldr r0, [sp, #140] @ 4-byte Reload
+ str r9, [r10, #36]
+ movne lr, r0
+ str lr, [r10, #40]
+ add sp, sp, #676
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end164:
+ .size mcl_fp_montRed11L, .Lfunc_end164-mcl_fp_montRed11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre11L
+ .align 2
+ .type mcl_fp_addPre11L,%function
+mcl_fp_addPre11L: @ @mcl_fp_addPre11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #20
+ sub sp, sp, #20
+ ldm r1, {r3, r12}
+ ldr r8, [r1, #8]
+ ldr r9, [r1, #12]
+ ldmib r2, {r5, r6, r7, r10}
+ ldr r4, [r2, #20]
+ ldr r11, [r2]
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ adds lr, r11, r3
+ ldr r3, [r2, #36]
+ ldr r11, [r2, #32]
+ adcs r5, r5, r12
+ add r12, r1, #16
+ adcs r6, r6, r8
+ adcs r7, r7, r9
+ add r9, r1, #32
+ str r4, [sp, #4] @ 4-byte Spill
+ ldr r4, [r2, #28]
+ ldr r2, [r2, #40]
+ str r3, [sp, #8] @ 4-byte Spill
+ str r4, [sp, #16] @ 4-byte Spill
+ str r2, [sp, #12] @ 4-byte Spill
+ ldm r9, {r4, r8, r9}
+ ldm r12, {r1, r2, r3, r12}
+ str lr, [r0]
+ stmib r0, {r5, r6}
+ str r7, [r0, #12]
+ ldr r7, [sp] @ 4-byte Reload
+ adcs r1, r10, r1
+ str r1, [r0, #16]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r2, r7, r2
+ str r2, [r0, #20]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r1, r1, r3
+ ldr r3, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r1, r1, r12
+ str r1, [r0, #28]
+ adcs r1, r11, r4
+ add r0, r0, #32
+ adcs r2, r2, r8
+ adcs r3, r3, r9
+ stm r0, {r1, r2, r3}
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #20
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end165:
+ .size mcl_fp_addPre11L, .Lfunc_end165-mcl_fp_addPre11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre11L
+ .align 2
+ .type mcl_fp_subPre11L,%function
+mcl_fp_subPre11L: @ @mcl_fp_subPre11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #20
+ sub sp, sp, #20
+ ldmib r2, {r8, r12, lr}
+ ldr r3, [r2, #16]
+ ldr r7, [r2]
+ ldr r6, [r1]
+ ldr r5, [r1, #4]
+ ldr r4, [r1, #8]
+ ldr r11, [r2, #32]
+ ldr r10, [r2, #40]
+ ldr r9, [r1, #36]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [r2, #20]
+ subs r6, r6, r7
+ ldr r7, [r2, #36]
+ sbcs r5, r5, r8
+ ldr r8, [r1, #40]
+ sbcs r4, r4, r12
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [r2, #24]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [r1, #12]
+ sbcs r12, r3, lr
+ add lr, r1, #16
+ ldm lr, {r1, r2, r3, lr}
+ str r6, [r0]
+ str r5, [r0, #4]
+ str r4, [r0, #8]
+ ldr r4, [sp, #4] @ 4-byte Reload
+ ldr r6, [sp, #8] @ 4-byte Reload
+ str r12, [r0, #12]
+ sbcs r1, r1, r4
+ str r1, [r0, #16]
+ ldr r1, [sp, #12] @ 4-byte Reload
+ sbcs r2, r2, r6
+ str r2, [r0, #20]
+ ldr r2, [sp] @ 4-byte Reload
+ sbcs r1, r3, r1
+ str r1, [r0, #24]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r1, lr, r1
+ str r1, [r0, #28]
+ sbcs r1, r7, r11
+ add r0, r0, #32
+ sbcs r2, r9, r2
+ sbcs r3, r8, r10
+ stm r0, {r1, r2, r3}
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #20
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end166:
+ .size mcl_fp_subPre11L, .Lfunc_end166-mcl_fp_subPre11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_11L
+ .align 2
+ .type mcl_fp_shr1_11L,%function
+mcl_fp_shr1_11L: @ @mcl_fp_shr1_11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ ldmib r1, {r2, r3, r12, lr}
+ add r8, r1, #20
+ add r11, r1, #32
+ ldm r8, {r4, r5, r8}
+ ldr r7, [r1]
+ ldm r11, {r9, r10, r11}
+ lsrs r1, r12, #1
+ lsr r6, r2, #1
+ rrx r1, r3
+ lsrs r2, r2, #1
+ orr r6, r6, r3, lsl #31
+ lsr r3, r11, #1
+ rrx r2, r7
+ stm r0, {r2, r6}
+ str r1, [r0, #8]
+ lsr r1, r12, #1
+ lsr r2, r10, #1
+ orr r1, r1, lr, lsl #31
+ orr r2, r2, r11, lsl #31
+ str r1, [r0, #12]
+ lsrs r1, r4, #1
+ rrx r1, lr
+ str r1, [r0, #16]
+ lsr r1, r4, #1
+ orr r1, r1, r5, lsl #31
+ str r1, [r0, #20]
+ lsrs r1, r8, #1
+ rrx r1, r5
+ str r1, [r0, #24]
+ lsr r1, r8, #1
+ orr r1, r1, r9, lsl #31
+ str r1, [r0, #28]
+ lsrs r1, r10, #1
+ add r0, r0, #32
+ rrx r1, r9
+ stm r0, {r1, r2, r3}
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end167:
+ .size mcl_fp_shr1_11L, .Lfunc_end167-mcl_fp_shr1_11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add11L
+ .align 2
+ .type mcl_fp_add11L,%function
+mcl_fp_add11L: @ @mcl_fp_add11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #32
+ sub sp, sp, #32
+ ldm r1, {r12, lr}
+ ldr r5, [r2]
+ ldr r8, [r1, #8]
+ ldr r9, [r1, #12]
+ ldmib r2, {r4, r6, r7}
+ adds r5, r5, r12
+ ldr r12, [r1, #32]
+ adcs r4, r4, lr
+ str r5, [sp, #28] @ 4-byte Spill
+ ldr r5, [r1, #24]
+ ldr lr, [r1, #40]
+ adcs r6, r6, r8
+ str r4, [sp, #24] @ 4-byte Spill
+ ldr r4, [r1, #20]
+ adcs r7, r7, r9
+ str r6, [sp, #12] @ 4-byte Spill
+ ldr r6, [r1, #16]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r2, #16]
+ adcs r9, r7, r6
+ ldr r7, [r2, #20]
+ str r9, [sp] @ 4-byte Spill
+ adcs r7, r7, r4
+ ldr r4, [r2, #24]
+ str r7, [sp, #4] @ 4-byte Spill
+ adcs r8, r4, r5
+ ldr r4, [r1, #28]
+ ldr r5, [r2, #28]
+ adcs r6, r5, r4
+ ldr r5, [r2, #32]
+ ldr r4, [r1, #36]
+ ldr r1, [r2, #36]
+ ldr r2, [r2, #40]
+ adcs r10, r5, r12
+ ldr r12, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r4
+ ldr r4, [sp, #8] @ 4-byte Reload
+ adcs r11, r2, lr
+ ldr r2, [sp, #28] @ 4-byte Reload
+ ldr lr, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ str r2, [r0]
+ str r12, [r0, #4]
+ str lr, [r0, #8]
+ str r4, [r0, #12]
+ str r9, [r0, #16]
+ str r7, [r0, #20]
+ str r8, [r0, #24]
+ str r6, [r0, #28]
+ str r10, [r0, #32]
+ str r1, [r0, #36]
+ mov r1, #0
+ str r11, [r0, #40]
+ mov r9, r6
+ adc r1, r1, #0
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r3, {r1, r7}
+ ldr r5, [r3, #8]
+ ldr r6, [r3, #12]
+ subs r1, r2, r1
+ ldr r2, [sp] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ sbcs r1, r12, r7
+ str r1, [sp, #24] @ 4-byte Spill
+ sbcs r1, lr, r5
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ sbcs r5, r4, r6
+ sbcs r7, r2, r1
+ ldr r1, [r3, #20]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ sbcs r4, r2, r1
+ ldr r1, [r3, #24]
+ sbcs r12, r8, r1
+ ldr r1, [r3, #28]
+ add r3, r3, #32
+ sbcs lr, r9, r1
+ ldm r3, {r1, r2, r3}
+ ldr r6, [sp, #20] @ 4-byte Reload
+ sbcs r1, r10, r1
+ sbcs r2, r6, r2
+ ldr r6, [sp, #16] @ 4-byte Reload
+ sbcs r3, r11, r3
+ sbc r6, r6, #0
+ tst r6, #1
+ bne .LBB168_2
+@ BB#1: @ %nocarry
+ ldr r6, [sp, #28] @ 4-byte Reload
+ str r6, [r0]
+ ldr r6, [sp, #24] @ 4-byte Reload
+ str r6, [r0, #4]
+ ldr r6, [sp, #12] @ 4-byte Reload
+ str r6, [r0, #8]
+ str r5, [r0, #12]
+ str r7, [r0, #16]
+ str r4, [r0, #20]
+ str r12, [r0, #24]
+ str lr, [r0, #28]
+ add r0, r0, #32
+ stm r0, {r1, r2, r3}
+.LBB168_2: @ %carry
+ add sp, sp, #32
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end168:
+ .size mcl_fp_add11L, .Lfunc_end168-mcl_fp_add11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF11L
+ .align 2
+ .type mcl_fp_addNF11L,%function
+mcl_fp_addNF11L: @ @mcl_fp_addNF11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #56
+ sub sp, sp, #56
+ ldm r1, {r5, r8, lr}
+ ldr r6, [r2]
+ ldr r12, [r1, #12]
+ ldmib r2, {r4, r7, r9}
+ ldr r11, [r1, #24]
+ adds r10, r6, r5
+ adcs r4, r4, r8
+ ldr r8, [r1, #20]
+ adcs r7, r7, lr
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r2, #16]
+ ldr lr, [r1, #36]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ adcs r6, r9, r12
+ ldr r12, [r2, #36]
+ str r6, [sp, #16] @ 4-byte Spill
+ adcs r7, r4, r7
+ ldr r4, [r2, #28]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ adcs r7, r7, r8
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ adcs r8, r7, r11
+ ldr r7, [r1, #28]
+ ldr r11, [r1, #40]
+ str r8, [sp, #20] @ 4-byte Spill
+ adcs r7, r4, r7
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ ldr r1, [r2, #32]
+ ldr r2, [r2, #40]
+ adcs r4, r1, r7
+ adcs r1, r12, lr
+ str r4, [sp, #24] @ 4-byte Spill
+ str r1, [sp, #48] @ 4-byte Spill
+ adc r9, r2, r11
+ ldmib r3, {r1, r2, lr}
+ ldr r5, [r3, #20]
+ ldr r11, [r3]
+ ldr r7, [r3, #16]
+ ldr r12, [r3, #24]
+ str r5, [sp, #12] @ 4-byte Spill
+ ldr r5, [r3, #28]
+ subs r11, r10, r11
+ str r5, [sp, #28] @ 4-byte Spill
+ ldr r5, [sp, #32] @ 4-byte Reload
+ sbcs r1, r5, r1
+ ldr r5, [sp, #40] @ 4-byte Reload
+ sbcs r2, r5, r2
+ ldr r5, [r3, #32]
+ sbcs lr, r6, lr
+ ldr r6, [sp, #36] @ 4-byte Reload
+ str r5, [sp, #8] @ 4-byte Spill
+ ldr r5, [r3, #36]
+ ldr r3, [r3, #40]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [sp, #44] @ 4-byte Reload
+ str r5, [sp] @ 4-byte Spill
+ ldr r5, [sp, #12] @ 4-byte Reload
+ sbcs r7, r3, r7
+ ldr r3, [sp, #52] @ 4-byte Reload
+ sbcs r3, r3, r5
+ ldr r5, [sp, #28] @ 4-byte Reload
+ sbcs r12, r8, r12
+ sbcs r8, r6, r5
+ ldr r5, [sp, #8] @ 4-byte Reload
+ sbcs r4, r4, r5
+ ldr r5, [sp] @ 4-byte Reload
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r4, [sp, #48] @ 4-byte Reload
+ sbcs r4, r4, r5
+ ldr r5, [sp, #32] @ 4-byte Reload
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [sp, #4] @ 4-byte Reload
+ sbc r6, r9, r4
+ asr r4, r6, #31
+ cmp r4, #0
+ movlt r11, r10
+ movlt r1, r5
+ str r11, [r0]
+ str r1, [r0, #4]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ movlt r2, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ cmp r4, #0
+ str r2, [r0, #8]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ movlt lr, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str lr, [r0, #12]
+ movlt r7, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r7, [r0, #16]
+ movlt r3, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ cmp r4, #0
+ str r3, [r0, #20]
+ ldr r3, [sp, #12] @ 4-byte Reload
+ movlt r12, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r12, [r0, #24]
+ movlt r8, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r8, [r0, #28]
+ movlt r3, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ cmp r4, #0
+ movlt r6, r9
+ str r3, [r0, #32]
+ movlt r2, r1
+ str r2, [r0, #36]
+ str r6, [r0, #40]
+ add sp, sp, #56
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end169:
+ .size mcl_fp_addNF11L, .Lfunc_end169-mcl_fp_addNF11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub11L
+ .align 2
+ .type mcl_fp_sub11L,%function
+mcl_fp_sub11L: @ @mcl_fp_sub11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #48
+ sub sp, sp, #48
+ mov r10, r3
+ ldr r12, [r2]
+ ldr r9, [r2, #4]
+ ldr r8, [r2, #8]
+ ldr r3, [r2, #12]
+ ldm r1, {r4, r5, r6, r7}
+ subs r4, r4, r12
+ sbcs r5, r5, r9
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ sbcs r6, r6, r8
+ str r5, [sp, #44] @ 4-byte Spill
+ ldr r5, [r2, #20]
+ add r8, r1, #32
+ sbcs r12, r7, r3
+ str r6, [sp, #40] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ ldr r7, [r1, #16]
+ ldr r3, [sp, #36] @ 4-byte Reload
+ str r12, [sp, #24] @ 4-byte Spill
+ sbcs r11, r7, r6
+ ldr r6, [r1, #20]
+ ldr r7, [r2, #40]
+ sbcs r9, r6, r5
+ ldr r5, [r1, #24]
+ sbcs r6, r5, r4
+ ldr r4, [r2, #28]
+ ldr r5, [r1, #28]
+ str r6, [sp, #28] @ 4-byte Spill
+ sbcs lr, r5, r4
+ ldr r4, [r2, #36]
+ ldr r5, [r2, #32]
+ str lr, [sp, #20] @ 4-byte Spill
+ str r4, [sp, #32] @ 4-byte Spill
+ ldm r8, {r2, r4, r8}
+ str r3, [r0]
+ sbcs r1, r2, r5
+ ldr r2, [sp, #32] @ 4-byte Reload
+ sbcs r2, r4, r2
+ mov r4, r3
+ ldr r3, [sp, #44] @ 4-byte Reload
+ sbcs r8, r8, r7
+ mov r7, #0
+ sbc r7, r7, #0
+ tst r7, #1
+ str r3, [r0, #4]
+ ldr r3, [sp, #40] @ 4-byte Reload
+ str r3, [r0, #8]
+ add r3, r0, #32
+ str r12, [r0, #12]
+ str r11, [r0, #16]
+ str r9, [r0, #20]
+ str r6, [r0, #24]
+ str lr, [r0, #28]
+ stm r3, {r1, r2, r8}
+ beq .LBB170_2
+@ BB#1: @ %carry
+ ldr r3, [r10, #32]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r10, #36]
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [r10, #40]
+ str r3, [sp, #32] @ 4-byte Spill
+ ldmib r10, {r5, lr}
+ ldr r3, [r10, #20]
+ ldr r6, [sp, #44] @ 4-byte Reload
+ ldr r7, [r10, #12]
+ ldr r12, [r10, #16]
+ str r3, [sp] @ 4-byte Spill
+ ldr r3, [r10, #24]
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [r10, #28]
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [r10]
+ adds r3, r3, r4
+ ldr r4, [sp, #40] @ 4-byte Reload
+ adcs r5, r5, r6
+ stm r0, {r3, r5}
+ ldr r3, [sp, #24] @ 4-byte Reload
+ adcs r4, lr, r4
+ str r4, [r0, #8]
+ adcs r3, r7, r3
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r3, [r0, #12]
+ adcs r3, r12, r11
+ str r3, [r0, #16]
+ ldr r3, [sp] @ 4-byte Reload
+ adcs r3, r3, r9
+ str r3, [r0, #20]
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r3, r7, r3
+ ldr r7, [sp, #8] @ 4-byte Reload
+ str r3, [r0, #24]
+ ldr r3, [sp, #20] @ 4-byte Reload
+ adcs r3, r7, r3
+ str r3, [r0, #28]
+ ldr r3, [sp, #12] @ 4-byte Reload
+ add r0, r0, #32
+ adcs r1, r3, r1
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adcs r2, r3, r2
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adc r3, r3, r8
+ stm r0, {r1, r2, r3}
+.LBB170_2: @ %nocarry
+ add sp, sp, #48
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end170:
+ .size mcl_fp_sub11L, .Lfunc_end170-mcl_fp_sub11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF11L
+ .align 2
+ .type mcl_fp_subNF11L,%function
+mcl_fp_subNF11L: @ @mcl_fp_subNF11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #68
+ sub sp, sp, #68
+ mov r12, r0
+ ldr r0, [r2, #32]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r2, #36]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r2, #40]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldm r2, {r8, r10}
+ ldr r0, [r2, #8]
+ ldr r5, [r2, #16]
+ ldr r11, [r2, #20]
+ ldr lr, [r1, #16]
+ ldr r6, [r1, #20]
+ ldr r9, [r1, #24]
+ ldr r7, [r1, #28]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r2, #12]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r2, #24]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r2, #28]
+ ldr r2, [r1, #8]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #12]
+ ldm r1, {r1, r4}
+ subs r1, r1, r8
+ sbcs r8, r4, r10
+ ldr r4, [sp, #32] @ 4-byte Reload
+ str r8, [sp, #16] @ 4-byte Spill
+ sbcs r2, r2, r4
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ sbcs r4, r0, r2
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #36] @ 4-byte Reload
+ sbcs r5, lr, r5
+ ldr lr, [r3, #12]
+ str r4, [sp, #20] @ 4-byte Spill
+ sbcs r11, r6, r11
+ mov r6, r1
+ str r5, [sp, #28] @ 4-byte Spill
+ str r11, [sp, #32] @ 4-byte Spill
+ sbcs r0, r9, r0
+ ldr r9, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbcs r0, r2, r0
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ sbcs r10, r2, r0
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r10, [sp, #48] @ 4-byte Spill
+ sbc r0, r7, r2
+ ldr r2, [r3, #36]
+ ldr r7, [r3, #4]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r3, #40]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r3, #8]
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [r3, #20]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [r3, #24]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [r3, #28]
+ ldr r3, [r3]
+ adds r1, r6, r3
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp] @ 4-byte Reload
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adcs r7, r8, r7
+ adcs r2, r9, r2
+ adcs lr, r4, lr
+ adcs r4, r5, r0
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r5, r11, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r8, r0, r3
+ ldr r3, [sp, #64] @ 4-byte Reload
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r11, r3, r0
+ ldr r3, [sp, #60] @ 4-byte Reload
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r3, r3, r0
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [sp, #44] @ 4-byte Reload
+ adcs r0, r10, r3
+ ldr r3, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r10, r0, r3
+ asr r3, r0, #31
+ ldr r0, [sp, #16] @ 4-byte Reload
+ cmp r3, #0
+ movge r1, r6
+ movge r2, r9
+ str r1, [r12]
+ ldr r1, [sp, #60] @ 4-byte Reload
+ movge r7, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ cmp r3, #0
+ str r7, [r12, #4]
+ str r2, [r12, #8]
+ ldr r2, [sp, #48] @ 4-byte Reload
+ movge lr, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ str lr, [r12, #12]
+ movge r4, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ str r4, [r12, #16]
+ movge r5, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ cmp r3, #0
+ str r5, [r12, #20]
+ movge r8, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r8, [r12, #24]
+ movge r11, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ movge r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ cmp r3, #0
+ str r11, [r12, #28]
+ movge r1, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ movge r10, r2
+ add r2, r12, #32
+ stm r2, {r0, r1, r10}
+ add sp, sp, #68
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end171:
+ .size mcl_fp_subNF11L, .Lfunc_end171-mcl_fp_subNF11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add11L
+ .align 2
+ .type mcl_fpDbl_add11L,%function
+mcl_fpDbl_add11L: @ @mcl_fpDbl_add11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #120
+ sub sp, sp, #120
+ ldm r1, {r7, r12, lr}
+ ldr r8, [r1, #12]
+ ldm r2, {r4, r5, r6, r9}
+ ldr r10, [r2, #20]
+ adds r4, r4, r7
+ adcs r7, r5, r12
+ str r4, [sp, #40] @ 4-byte Spill
+ ldr r4, [r2, #64]
+ str r7, [sp, #28] @ 4-byte Spill
+ adcs r7, r6, lr
+ add lr, r1, #16
+ str r7, [sp, #24] @ 4-byte Spill
+ adcs r7, r9, r8
+ add r8, r1, #32
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r4, [sp, #108] @ 4-byte Spill
+ ldr r4, [r2, #68]
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r4, [sp, #104] @ 4-byte Spill
+ ldr r4, [r2, #72]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r4, [sp, #96] @ 4-byte Spill
+ ldr r4, [r2, #76]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r4, [sp, #116] @ 4-byte Spill
+ ldr r4, [r2, #80]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r4, [sp, #100] @ 4-byte Spill
+ ldr r4, [r2, #84]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r4, [sp, #112] @ 4-byte Spill
+ ldr r4, [r2, #16]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r2, [r1, #64]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r7, [sp, #16] @ 4-byte Spill
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldm r8, {r5, r6, r8}
+ ldr r2, [r1, #44]
+ ldr r11, [r1, #52]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r9, [sp, #40] @ 4-byte Reload
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r1, r4, r1
+ str r9, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #24] @ 4-byte Reload
+ ldr r4, [sp, #32] @ 4-byte Reload
+ adcs r2, r10, r2
+ add r10, r3, #32
+ str r7, [r0, #8]
+ str r4, [r0, #12]
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ ldr r7, [sp] @ 4-byte Reload
+ adcs r1, r1, r12
+ str r1, [r0, #24]
+ adcs r2, r2, lr
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r2, [r0, #28]
+ ldr r2, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [r0, #32]
+ adcs r2, r2, r6
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r2, [r0, #36]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r8
+ adcs r6, r2, r7
+ str r1, [r0, #40]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ ldr r7, [sp, #8] @ 4-byte Reload
+ str r6, [sp, #72] @ 4-byte Spill
+ adcs r4, r1, r2
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r4, [sp, #76] @ 4-byte Spill
+ adcs r2, r1, r11
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r2, [sp, #80] @ 4-byte Spill
+ adcs r5, r1, r7
+ ldr r1, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r5, [sp, #92] @ 4-byte Spill
+ adcs r8, r1, r7
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r8, [sp, #84] @ 4-byte Spill
+ adcs r1, r1, r7
+ ldr r7, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r7
+ ldr r7, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r12, r1, r7
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r7, [sp, #56] @ 4-byte Reload
+ str r12, [sp, #96] @ 4-byte Spill
+ adcs r1, r1, r7
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r7
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #112] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ str r1, [sp, #88] @ 4-byte Spill
+ ldmib r3, {r1, r9, lr}
+ ldr r7, [r3, #16]
+ ldr r11, [r3]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r3, #20]
+ subs r11, r6, r11
+ sbcs r1, r4, r1
+ sbcs r4, r2, r9
+ sbcs r2, r5, lr
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r3, #24]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r3, #28]
+ str r7, [sp, #68] @ 4-byte Spill
+ ldm r10, {r5, r9, r10}
+ ldr r3, [sp, #56] @ 4-byte Reload
+ ldr r6, [sp, #60] @ 4-byte Reload
+ sbcs r7, r8, r3
+ ldr r3, [sp, #108] @ 4-byte Reload
+ sbcs r8, r3, r6
+ ldr r3, [sp, #104] @ 4-byte Reload
+ ldr r6, [sp, #64] @ 4-byte Reload
+ sbcs r3, r3, r6
+ ldr r6, [sp, #68] @ 4-byte Reload
+ sbcs r12, r12, r6
+ ldr r6, [sp, #116] @ 4-byte Reload
+ sbcs lr, r6, r5
+ ldr r5, [sp, #100] @ 4-byte Reload
+ ldr r6, [sp, #112] @ 4-byte Reload
+ sbcs r9, r5, r9
+ ldr r5, [sp, #72] @ 4-byte Reload
+ sbcs r10, r6, r10
+ ldr r6, [sp, #88] @ 4-byte Reload
+ sbc r6, r6, #0
+ ands r6, r6, #1
+ movne r11, r5
+ ldr r5, [sp, #76] @ 4-byte Reload
+ str r11, [r0, #44]
+ movne r1, r5
+ str r1, [r0, #48]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ movne r4, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ cmp r6, #0
+ str r4, [r0, #52]
+ movne r2, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r2, [r0, #56]
+ movne r7, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r7, [r0, #60]
+ movne r8, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ cmp r6, #0
+ str r8, [r0, #64]
+ movne r3, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r3, [r0, #68]
+ movne r12, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r12, [r0, #72]
+ movne lr, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ cmp r6, #0
+ str lr, [r0, #76]
+ movne r9, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r9, [r0, #80]
+ movne r10, r1
+ str r10, [r0, #84]
+ add sp, sp, #120
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end172:
+ .size mcl_fpDbl_add11L, .Lfunc_end172-mcl_fpDbl_add11L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub11L
+ .align 2
+ .type mcl_fpDbl_sub11L,%function
+mcl_fpDbl_sub11L: @ @mcl_fpDbl_sub11L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #120
+ sub sp, sp, #120
+ ldr r7, [r2, #64]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #100] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #104] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #108] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #116] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r2]
+ ldmib r2, {r4, r8, r10}
+ ldm r1, {r5, r6, r12, lr}
+ ldr r9, [r2, #20]
+ subs r5, r5, r7
+ ldr r7, [r2, #24]
+ sbcs r4, r6, r4
+ str r5, [sp, #16] @ 4-byte Spill
+ ldr r5, [r2, #32]
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r2, #28]
+ sbcs r8, r12, r8
+ str r7, [sp, #32] @ 4-byte Spill
+ sbcs r7, lr, r10
+ add r10, r1, #32
+ add lr, r1, #16
+ str r5, [sp, #40] @ 4-byte Spill
+ str r7, [sp] @ 4-byte Spill
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r2, #16]
+ ldr r2, [r1, #64]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldm r10, {r5, r6, r10}
+ ldr r2, [r1, #44]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #16] @ 4-byte Reload
+ ldr r7, [sp, #8] @ 4-byte Reload
+ str r11, [r0]
+ stmib r0, {r7, r8}
+ sbcs r1, r1, r4
+ mov r8, #0
+ ldr r4, [sp] @ 4-byte Reload
+ sbcs r2, r2, r9
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r4, [r0, #12]
+ str r1, [r0, #16]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #36] @ 4-byte Reload
+ sbcs r1, r12, r1
+ str r1, [r0, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r2, lr, r2
+ str r2, [r0, #28]
+ ldr r2, [sp, #68] @ 4-byte Reload
+ sbcs r1, r5, r1
+ str r1, [r0, #32]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ sbcs r2, r6, r2
+ str r2, [r0, #36]
+ ldr r2, [sp, #12] @ 4-byte Reload
+ sbcs r1, r10, r1
+ str r1, [r0, #40]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ sbcs r4, r2, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r4, [sp, #40] @ 4-byte Spill
+ sbcs r2, r2, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r2, [sp, #68] @ 4-byte Spill
+ sbcs r9, r7, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ ldr r7, [sp, #24] @ 4-byte Reload
+ sbcs r12, r7, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r12, [sp, #80] @ 4-byte Spill
+ sbcs lr, r7, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str lr, [sp, #84] @ 4-byte Spill
+ sbcs r5, r7, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #48] @ 4-byte Reload
+ str r5, [sp, #96] @ 4-byte Spill
+ sbcs r6, r7, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ ldr r7, [sp, #52] @ 4-byte Reload
+ str r6, [sp, #100] @ 4-byte Spill
+ sbcs r11, r7, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r7, [sp, #56] @ 4-byte Reload
+ str r11, [sp, #104] @ 4-byte Spill
+ sbcs r1, r7, r1
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ sbcs r10, r7, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r10, [sp, #108] @ 4-byte Spill
+ sbcs r1, r7, r1
+ ldr r7, [r3, #4]
+ str r1, [sp, #116] @ 4-byte Spill
+ sbc r1, r8, #0
+ ldr r8, [r3, #28]
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [r3, #8]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [r3, #12]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ ldr r3, [r3]
+ str r1, [sp, #64] @ 4-byte Spill
+ adds r1, r4, r3
+ ldr r3, [sp, #48] @ 4-byte Reload
+ ldr r4, [sp, #56] @ 4-byte Reload
+ adcs r7, r2, r7
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r2, r9, r2
+ adcs r3, r12, r3
+ adcs r12, lr, r4
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r4, r5, r4
+ ldr r5, [sp, #64] @ 4-byte Reload
+ adcs lr, r6, r5
+ ldr r6, [sp, #112] @ 4-byte Reload
+ ldr r5, [sp, #72] @ 4-byte Reload
+ adcs r8, r11, r8
+ adcs r11, r6, r5
+ ldr r6, [sp, #76] @ 4-byte Reload
+ ldr r5, [sp, #116] @ 4-byte Reload
+ adcs r10, r10, r6
+ ldr r6, [sp, #88] @ 4-byte Reload
+ adc r6, r5, r6
+ str r6, [sp, #88] @ 4-byte Spill
+ ldr r6, [sp, #92] @ 4-byte Reload
+ ands r5, r6, #1
+ ldr r6, [sp, #40] @ 4-byte Reload
+ moveq r2, r9
+ moveq r1, r6
+ str r1, [r0, #44]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ moveq r7, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ cmp r5, #0
+ str r7, [r0, #48]
+ str r2, [r0, #52]
+ ldr r2, [sp, #88] @ 4-byte Reload
+ moveq r3, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r3, [r0, #56]
+ moveq r12, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r12, [r0, #60]
+ moveq r4, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ cmp r5, #0
+ str r4, [r0, #64]
+ moveq lr, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str lr, [r0, #68]
+ moveq r8, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r8, [r0, #72]
+ moveq r11, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ cmp r5, #0
+ str r11, [r0, #76]
+ moveq r10, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r10, [r0, #80]
+ moveq r2, r1
+ str r2, [r0, #84]
+ add sp, sp, #120
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end173:
+ .size mcl_fpDbl_sub11L, .Lfunc_end173-mcl_fpDbl_sub11L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv384x32,%function
+.LmulPv384x32: @ @mulPv384x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r3, [r1, #32]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #32]
+ ldr r3, [r1, #36]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #36]
+ ldr r3, [r1, #40]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #40]
+ ldr r1, [r1, #44]
+ umull r3, r7, r1, r2
+ adcs r1, r6, r3
+ str r1, [r0, #44]
+ adc r1, r7, #0
+ str r1, [r0, #48]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end174:
+ .size .LmulPv384x32, .Lfunc_end174-.LmulPv384x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre12L
+ .align 2
+ .type mcl_fp_mulUnitPre12L,%function
+mcl_fp_mulUnitPre12L: @ @mcl_fp_mulUnitPre12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ mov r4, r0
+ mov r0, sp
+ bl .LmulPv384x32(PLT)
+ ldr r12, [sp, #48]
+ ldr lr, [sp, #44]
+ ldr r8, [sp, #40]
+ ldr r9, [sp, #36]
+ ldr r10, [sp, #32]
+ ldr r11, [sp, #28]
+ ldr r5, [sp, #24]
+ ldr r6, [sp, #20]
+ ldm sp, {r2, r3}
+ add r7, sp, #8
+ ldm r7, {r0, r1, r7}
+ stm r4, {r2, r3}
+ add r2, r4, #8
+ stm r2, {r0, r1, r7}
+ str r6, [r4, #20]
+ str r5, [r4, #24]
+ str r11, [r4, #28]
+ str r10, [r4, #32]
+ str r9, [r4, #36]
+ str r8, [r4, #40]
+ str lr, [r4, #44]
+ str r12, [r4, #48]
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end175:
+ .size mcl_fp_mulUnitPre12L, .Lfunc_end175-mcl_fp_mulUnitPre12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre12L
+ .align 2
+ .type mcl_fpDbl_mulPre12L,%function
+mcl_fpDbl_mulPre12L: @ @mcl_fpDbl_mulPre12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #196
+ sub sp, sp, #196
+ mov r6, r2
+ mov r5, r1
+ mov r4, r0
+ bl mcl_fpDbl_mulPre6L(PLT)
+ add r0, r4, #48
+ add r1, r5, #24
+ add r2, r6, #24
+ bl mcl_fpDbl_mulPre6L(PLT)
+ add lr, r6, #24
+ ldr r8, [r6, #40]
+ ldr r9, [r6, #44]
+ ldr r2, [r6, #16]
+ ldr r3, [r6, #20]
+ ldm lr, {r0, r1, r12, lr}
+ ldm r6, {r6, r7, r10, r11}
+ adds r0, r6, r0
+ adcs r1, r7, r1
+ str r0, [sp, #80] @ 4-byte Spill
+ adcs r12, r10, r12
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r10, [r5, #36]
+ adcs r0, r11, lr
+ add lr, r5, #8
+ str r12, [sp, #68] @ 4-byte Spill
+ str r0, [sp, #92] @ 4-byte Spill
+ adcs r0, r2, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ adcs r0, r3, r9
+ ldr r9, [r5, #32]
+ str r0, [sp, #84] @ 4-byte Spill
+ mov r0, #0
+ adc r6, r0, #0
+ ldr r0, [r5, #40]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r5, #44]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldm lr, {r3, r11, lr}
+ ldr r8, [r5, #20]
+ ldr r0, [r5, #24]
+ ldr r2, [r5, #28]
+ ldm r5, {r5, r7}
+ adds r0, r5, r0
+ ldr r5, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ str r0, [sp, #124]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r7, r7, r2
+ add r2, sp, #100
+ adcs r9, r3, r9
+ str r7, [sp, #128]
+ adcs r11, r11, r10
+ str r9, [sp, #132]
+ str r5, [sp, #100]
+ str r1, [sp, #104]
+ str r12, [sp, #108]
+ add r1, sp, #124
+ str r11, [sp, #136]
+ adcs r10, lr, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r10, [sp, #140]
+ adcs r8, r8, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r8, [sp, #144]
+ str r0, [sp, #112]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #116]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #120]
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ add r0, sp, #148
+ bl mcl_fpDbl_mulPre6L(PLT)
+ cmp r6, #0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r3, [sp, #92] @ 4-byte Reload
+ moveq r8, r6
+ moveq r10, r6
+ moveq r11, r6
+ moveq r9, r6
+ moveq r7, r6
+ cmp r6, #0
+ moveq r0, r6
+ adds r2, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r5, [sp, #88] @ 4-byte Reload
+ adcs r1, r7, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r12, r9, r0
+ adcs r3, r11, r3
+ adcs lr, r10, r5
+ ldr r5, [sp, #84] @ 4-byte Reload
+ adcs r0, r8, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ mov r0, #0
+ adc r5, r0, #0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ cmp r0, #0
+ and r6, r6, r0
+ moveq r1, r7
+ ldr r7, [sp, #96] @ 4-byte Reload
+ moveq r12, r9
+ ldr r9, [sp, #92] @ 4-byte Reload
+ moveq lr, r10
+ moveq r3, r11
+ moveq r2, r7
+ ldr r7, [sp, #172]
+ cmp r0, #0
+ moveq r9, r8
+ moveq r5, r0
+ adds r8, r2, r7
+ ldr r7, [sp, #176]
+ adcs r10, r1, r7
+ ldr r7, [sp, #180]
+ adcs r0, r12, r7
+ ldr r7, [sp, #184]
+ str r0, [sp, #96] @ 4-byte Spill
+ adcs r0, r3, r7
+ ldr r7, [sp, #188]
+ str r0, [sp, #92] @ 4-byte Spill
+ adcs r0, lr, r7
+ ldr r7, [sp, #192]
+ str r0, [sp, #84] @ 4-byte Spill
+ adcs r0, r9, r7
+ ldr r7, [r4]
+ str r0, [sp, #80] @ 4-byte Spill
+ adc r0, r5, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldmib r4, {r6, r9, lr}
+ ldr r0, [sp, #148]
+ ldr r5, [sp, #152]
+ ldr r1, [sp, #156]
+ ldr r2, [sp, #160]
+ ldr r11, [r4, #24]
+ subs r3, r0, r7
+ ldr r0, [r4, #16]
+ sbcs r12, r5, r6
+ ldr r5, [r4, #68]
+ sbcs r6, r1, r9
+ ldr r1, [sp, #164]
+ ldr r9, [r4, #32]
+ sbcs r2, r2, lr
+ ldr lr, [r4, #72]
+ str r5, [sp, #56] @ 4-byte Spill
+ sbcs r7, r1, r0
+ ldr r0, [r4, #20]
+ ldr r1, [sp, #168]
+ sbcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ sbcs r0, r8, r11
+ ldr r8, [r4, #28]
+ str r0, [sp, #60] @ 4-byte Spill
+ sbcs r0, r10, r8
+ ldr r10, [r4, #52]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ sbcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r4, #36]
+ str r0, [sp, #96] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r4, #40]
+ str r0, [sp, #88] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r4, #44]
+ str r0, [sp, #92] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [r4, #92]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ sbc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r4, #48]
+ str r0, [sp, #80] @ 4-byte Spill
+ subs r0, r3, r0
+ ldr r3, [r4, #80]
+ str r0, [sp, #24] @ 4-byte Spill
+ sbcs r0, r12, r10
+ ldr r12, [r4, #76]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r4, #56]
+ str r0, [sp, #76] @ 4-byte Spill
+ sbcs r0, r6, r0
+ ldr r6, [r4, #64]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r4, #60]
+ str r6, [sp, #44] @ 4-byte Spill
+ str r0, [sp, #72] @ 4-byte Spill
+ sbcs r0, r2, r0
+ ldr r2, [r4, #84]
+ sbcs r7, r7, r6
+ ldr r6, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r4, #88]
+ str r2, [sp, #68] @ 4-byte Spill
+ sbcs r6, r6, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ sbcs r5, r5, lr
+ str r5, [sp] @ 4-byte Spill
+ ldr r5, [sp, #52] @ 4-byte Reload
+ sbcs r5, r5, r12
+ str r5, [sp, #4] @ 4-byte Spill
+ ldr r5, [sp, #48] @ 4-byte Reload
+ sbcs r5, r5, r3
+ str r5, [sp, #8] @ 4-byte Spill
+ ldr r5, [sp, #40] @ 4-byte Reload
+ sbcs r2, r5, r2
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [sp, #36] @ 4-byte Reload
+ sbcs r2, r2, r0
+ str r2, [sp, #52] @ 4-byte Spill
+ mov r2, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ sbc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adds r11, r11, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ str r11, [r4, #24]
+ adcs r8, r8, r0
+ ldr r0, [sp, #16] @ 4-byte Reload
+ str r8, [r4, #28]
+ adcs r9, r9, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r9, [r4, #32]
+ adcs r5, r0, r1
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r1, [sp] @ 4-byte Reload
+ str r5, [r4, #36]
+ ldr r5, [sp, #8] @ 4-byte Reload
+ adcs r7, r0, r7
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r7, [r4, #40]
+ adcs r6, r0, r6
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r6, [r4, #44]
+ adcs r0, r0, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [r4, #48]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r1, r10, r1
+ adcs r0, r0, r5
+ str r1, [r4, #52]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ ldr r5, [sp, #48] @ 4-byte Reload
+ str r0, [r4, #56]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #52] @ 4-byte Reload
+ str r1, [r4, #60]
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [r4, #64]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [r4, #68]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [r4, #72]
+ adcs r0, r12, #0
+ str r0, [r4, #76]
+ adcs r0, r3, #0
+ str r0, [r4, #80]
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [r4, #84]
+ adcs r0, r2, #0
+ adc r1, r1, #0
+ str r0, [r4, #88]
+ str r1, [r4, #92]
+ add sp, sp, #196
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end176:
+ .size mcl_fpDbl_mulPre12L, .Lfunc_end176-mcl_fpDbl_mulPre12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre12L
+ .align 2
+ .type mcl_fpDbl_sqrPre12L,%function
+mcl_fpDbl_sqrPre12L: @ @mcl_fpDbl_sqrPre12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #204
+ sub sp, sp, #204
+ mov r5, r1
+ mov r4, r0
+ mov r2, r5
+ bl mcl_fpDbl_mulPre6L(PLT)
+ add r1, r5, #24
+ add r0, r4, #48
+ mov r2, r1
+ bl mcl_fpDbl_mulPre6L(PLT)
+ ldr r10, [r5, #32]
+ ldr r9, [r5, #36]
+ ldr lr, [r5, #40]
+ ldr r12, [r5, #44]
+ ldr r3, [r5, #8]
+ ldr r2, [r5, #12]
+ ldr r1, [r5, #16]
+ ldr r11, [r5, #20]
+ ldr r6, [r5, #24]
+ ldr r0, [r5, #28]
+ ldm r5, {r5, r7}
+ adds r8, r5, r6
+ adcs r6, r7, r0
+ mov r0, #0
+ str r8, [sp, #132]
+ str r8, [sp, #108]
+ adcs r10, r3, r10
+ str r6, [sp, #136]
+ str r6, [sp, #112]
+ adcs r5, r2, r9
+ add r2, sp, #108
+ str r10, [sp, #140]
+ str r10, [sp, #116]
+ adcs r9, r1, lr
+ add r1, sp, #132
+ str r5, [sp, #144]
+ str r5, [sp, #120]
+ adcs r7, r11, r12
+ str r9, [sp, #148]
+ str r9, [sp, #124]
+ adc r11, r0, #0
+ add r0, sp, #156
+ str r7, [sp, #152]
+ str r7, [sp, #128]
+ bl mcl_fpDbl_mulPre6L(PLT)
+ adds r0, r9, r9
+ ldr lr, [sp, #192]
+ ldr r12, [sp, #196]
+ ldr r9, [sp, #200]
+ orr r0, r0, r5, lsr #31
+ str r0, [sp, #104] @ 4-byte Spill
+ adc r0, r7, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ adds r0, r10, r10
+ ldr r10, [sp, #180]
+ adc r1, r5, r5
+ orr r0, r0, r6, lsr #31
+ str r1, [sp, #92] @ 4-byte Spill
+ adds r1, r8, r8
+ ldr r8, [sp, #184]
+ adc r5, r6, r6
+ ldr r6, [sp, #188]
+ adds r1, r10, r1
+ str r1, [sp, #96] @ 4-byte Spill
+ adcs r3, r8, r5
+ ldr r5, [sp, #100] @ 4-byte Reload
+ adcs r2, r6, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r1, lr, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r12, r0
+ adcs r5, r9, r5
+ adc r7, r11, r7, lsr #31
+ cmp r11, #0
+ moveq r3, r8
+ moveq r2, r6
+ moveq r5, r9
+ moveq r0, r12
+ moveq r1, lr
+ cmp r11, #0
+ ldr r6, [sp, #96] @ 4-byte Reload
+ mov r8, r3
+ add r3, sp, #156
+ str r0, [sp, #104] @ 4-byte Spill
+ str r1, [sp, #100] @ 4-byte Spill
+ str r2, [sp, #88] @ 4-byte Spill
+ mov r9, r5
+ ldm r4, {r12, lr}
+ moveq r7, r11
+ ldr r11, [r4, #8]
+ ldr r5, [r4, #12]
+ moveq r6, r10
+ ldm r3, {r0, r1, r2, r3}
+ ldr r10, [r4, #64]
+ subs r12, r0, r12
+ ldr r0, [r4, #16]
+ sbcs lr, r1, lr
+ ldr r1, [sp, #172]
+ sbcs r2, r2, r11
+ ldr r11, [r4, #48]
+ sbcs r3, r3, r5
+ ldr r5, [r4, #68]
+ sbcs r0, r1, r0
+ ldr r1, [sp, #176]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r4, #20]
+ str r5, [sp, #60] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r4, #24]
+ str r0, [sp, #96] @ 4-byte Spill
+ sbcs r0, r6, r0
+ ldr r6, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r4, #28]
+ str r0, [sp, #72] @ 4-byte Spill
+ sbcs r0, r8, r0
+ ldr r8, [r4, #56]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r4, #32]
+ str r0, [sp, #92] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r4, #36]
+ str r0, [sp, #88] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [r4, #40]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ sbcs r0, r0, r1
+ ldr r1, [r4, #92]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r4, #44]
+ str r1, [sp, #84] @ 4-byte Spill
+ str r0, [sp, #104] @ 4-byte Spill
+ sbcs r0, r9, r0
+ ldr r9, [r4, #60]
+ str r0, [sp, #40] @ 4-byte Spill
+ sbc r0, r7, #0
+ ldr r7, [r4, #52]
+ str r0, [sp, #36] @ 4-byte Spill
+ subs r0, r12, r11
+ ldr r12, [r4, #76]
+ str r0, [sp, #32] @ 4-byte Spill
+ sbcs r0, lr, r7
+ ldr lr, [r4, #72]
+ str r0, [sp, #28] @ 4-byte Spill
+ sbcs r0, r2, r8
+ ldr r2, [r4, #84]
+ str r0, [sp, #24] @ 4-byte Spill
+ sbcs r0, r3, r9
+ ldr r3, [r4, #80]
+ sbcs r6, r6, r10
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r4, #88]
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [sp, #68] @ 4-byte Reload
+ str r2, [sp, #80] @ 4-byte Spill
+ sbcs r5, r6, r5
+ str r5, [sp, #8] @ 4-byte Spill
+ ldr r5, [sp, #64] @ 4-byte Reload
+ sbcs r5, r5, lr
+ str r5, [sp, #12] @ 4-byte Spill
+ ldr r5, [sp, #56] @ 4-byte Reload
+ sbcs r5, r5, r12
+ str r5, [sp, #16] @ 4-byte Spill
+ ldr r5, [sp, #52] @ 4-byte Reload
+ sbcs r5, r5, r3
+ str r5, [sp, #52] @ 4-byte Spill
+ ldr r5, [sp, #48] @ 4-byte Reload
+ sbcs r2, r5, r2
+ ldr r5, [sp, #28] @ 4-byte Reload
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [sp, #44] @ 4-byte Reload
+ sbcs r2, r2, r0
+ str r2, [sp, #64] @ 4-byte Spill
+ mov r2, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ sbc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adds r0, r0, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r0, [r4, #24]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r6, r1, r5
+ ldr r1, [sp, #24] @ 4-byte Reload
+ ldr r5, [sp, #20] @ 4-byte Reload
+ str r6, [r4, #28]
+ adcs r0, r0, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [r4, #32]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r6, r1, r5
+ ldr r1, [sp, #4] @ 4-byte Reload
+ ldr r5, [sp, #8] @ 4-byte Reload
+ str r6, [r4, #36]
+ adcs r0, r0, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r0, [r4, #40]
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r5, r1, r5
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r5, [r4, #44]
+ str r0, [r4, #48]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r1, r7, r1
+ str r1, [r4, #52]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [r4, #56]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r1, r9, r1
+ str r1, [r4, #60]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [r4, #64]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [r4, #68]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [r4, #72]
+ adcs r0, r12, #0
+ str r0, [r4, #76]
+ adcs r0, r3, #0
+ str r0, [r4, #80]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [r4, #84]
+ adcs r0, r2, #0
+ adc r1, r1, #0
+ str r0, [r4, #88]
+ str r1, [r4, #92]
+ add sp, sp, #204
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end177:
+ .size mcl_fpDbl_sqrPre12L, .Lfunc_end177-mcl_fpDbl_sqrPre12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont12L
+ .align 2
+ .type mcl_fp_mont12L,%function
+mcl_fp_mont12L: @ @mcl_fp_mont12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #428
+ sub sp, sp, #428
+ .pad #1024
+ sub sp, sp, #1024
+ str r2, [sp, #92] @ 4-byte Spill
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, sp, #1392
+ str r3, [sp, #100] @ 4-byte Spill
+ str r1, [sp, #96] @ 4-byte Spill
+ mov r4, r3
+ str r5, [sp, #88] @ 4-byte Spill
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1396]
+ ldr r6, [sp, #1392]
+ add r11, sp, #1024
+ mov r1, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1400]
+ mul r2, r6, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1404]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1440]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1436]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1432]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1428]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #1424]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1420]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1416]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1412]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1408]
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, r11, #312
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1384]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r5, [sp, #1360]
+ ldr r8, [sp, #1356]
+ ldr r7, [sp, #1352]
+ ldr r10, [sp, #1336]
+ ldr r9, [sp, #1340]
+ ldr r4, [sp, #1344]
+ ldr r11, [sp, #1348]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1380]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1376]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1372]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1368]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1364]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, sp, #1280
+ bl .LmulPv384x32(PLT)
+ adds r0, r10, r6
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ ldr r3, [sp, #1296]
+ ldr r12, [sp, #1300]
+ ldr lr, [sp, #1304]
+ ldr r6, [sp, #1312]
+ ldr r10, [sp, #1328]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #1324]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #1280]
+ adcs r1, r11, r1
+ ldr r11, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r7, r1
+ ldr r7, [sp, #1316]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r8, r1
+ ldr r8, [sp, #1320]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r5, r1
+ ldr r5, [sp, #1308]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #1292]
+ adc r0, r0, #0
+ adds r11, r11, r4
+ ldr r4, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #1288]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1284]
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r7
+ add r7, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, r7, #200
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1272]
+ add r9, sp, #1232
+ ldr r5, [sp, #1248]
+ ldr r8, [sp, #1244]
+ ldr r10, [sp, #1224]
+ ldr r11, [sp, #1228]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1268]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1260]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1256]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1252]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #1168
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #1168
+ ldr r10, [sp, #1212]
+ ldr r4, [sp, #1192]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #1216]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1200]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1208]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1204]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1196]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ add r5, sp, #1024
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, r5, #88
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1160]
+ add r10, sp, #1120
+ ldr r6, [sp, #1136]
+ ldr r9, [sp, #1132]
+ ldr r11, [sp, #1112]
+ ldr r7, [sp, #1116]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1156]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1152]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1140]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #1056
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r2, [sp, #1068]
+ ldr r3, [sp, #1072]
+ ldr r12, [sp, #1076]
+ ldr lr, [sp, #1080]
+ ldr r8, [sp, #1096]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1092]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r11, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1056]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1084]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1104]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1100]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1088]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1064]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r11, r11, r4
+ ldr r4, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #1000
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1048]
+ add r9, sp, #1008
+ ldr r5, [sp, #1024]
+ ldr r8, [sp, #1020]
+ ldr r10, [sp, #1000]
+ ldr r11, [sp, #1004]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1032]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #944
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #944
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #968
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #888
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #936]
+ add r10, sp, #896
+ ldr r6, [sp, #912]
+ ldr r9, [sp, #908]
+ ldr r11, [sp, #888]
+ ldr r7, [sp, #892]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #932]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #916]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #832
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #836
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #860
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldr r4, [sp, #832]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #84] @ 4-byte Reload
+ adds r11, r11, r4
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #776
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #824]
+ add r9, sp, #784
+ ldr r5, [sp, #800]
+ ldr r8, [sp, #796]
+ ldr r10, [sp, #776]
+ ldr r11, [sp, #780]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #820]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #816]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #720
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #720
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #744
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #664
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #712]
+ add r10, sp, #672
+ ldr r6, [sp, #688]
+ ldr r9, [sp, #684]
+ ldr r11, [sp, #664]
+ ldr r7, [sp, #668]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #608
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #612
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #636
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldr r4, [sp, #608]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #84] @ 4-byte Reload
+ adds r11, r11, r4
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #552
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #600]
+ add r9, sp, #560
+ ldr r5, [sp, #576]
+ ldr r8, [sp, #572]
+ ldr r10, [sp, #552]
+ ldr r11, [sp, #556]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #592]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #496
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #496
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #520
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #440
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #488]
+ add r10, sp, #448
+ ldr r6, [sp, #464]
+ ldr r9, [sp, #460]
+ ldr r11, [sp, #440]
+ ldr r7, [sp, #444]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #484]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #480]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #476]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #468]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #384
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #388
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #412
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldr r4, [sp, #384]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #84] @ 4-byte Reload
+ adds r11, r11, r4
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ mul r2, r11, r6
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #328
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #376]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r5, [sp, #348]
+ ldr r9, [sp, #344]
+ ldr r10, [sp, #328]
+ ldr r11, [sp, #332]
+ ldr r8, [sp, #336]
+ ldr r7, [sp, #340]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #372]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #364]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #360]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #356]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #352]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #272
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r2, [sp, #4] @ 4-byte Reload
+ add r12, sp, #288
+ ldr lr, [sp, #276]
+ ldr r4, [sp, #284]
+ ldr r10, [sp, #312]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r1, r0, r11
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #316]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #320]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #280]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #272]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ adds r0, r1, r2
+ mul r11, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r6, [sp, #308]
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r7, [sp, #80] @ 4-byte Reload
+ adcs r7, r7, lr
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adcs r7, r7, r5
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adcs r7, r7, r4
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ mov r2, r11
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #216
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #264]
+ add r10, sp, #220
+ ldr r6, [sp, #244]
+ ldr r7, [sp, #240]
+ ldr r8, [sp, #236]
+ ldr r9, [sp, #232]
+ ldr r11, [sp, #216]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #256]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #252]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #248]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #160
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #160
+ add r12, sp, #176
+ adds r0, r0, r11
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r4, r0, r4
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r11, r0, r5
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #196
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldm lr, {r2, r7, lr}
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r6, [sp, #172]
+ adds r4, r4, r2
+ mul r1, r4, r0
+ adcs r7, r11, r7
+ str r1, [sp, #44] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldm r12, {r0, r1, r2, r3, r12}
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r11, r7, lr
+ ldr r7, [sp, #92] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #100] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r7, r0, r5
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r8, r0, r8
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r10, r0, r10
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #104
+ bl .LmulPv384x32(PLT)
+ add r5, sp, #104
+ mov r3, r6
+ ldm r5, {r0, r1, r2, r5}
+ adds r0, r4, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs lr, r0, r1
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r4, r11, r2
+ str lr, [sp, #44] @ 4-byte Spill
+ str r4, [sp, #48] @ 4-byte Spill
+ adcs r2, r0, r5
+ ldr r0, [sp, #120]
+ str r2, [sp, #52] @ 4-byte Spill
+ adcs r5, r1, r0
+ ldr r0, [sp, #124]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r5, [sp, #56] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #128]
+ adcs r0, r1, r0
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #132]
+ adcs r12, r1, r0
+ ldr r0, [sp, #136]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r12, [sp, #60] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #140]
+ adcs r0, r7, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #144]
+ adcs r0, r8, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #148]
+ adcs r0, r1, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #152]
+ adcs r0, r10, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldmib r3, {r0, r1, r7, r10}
+ ldr r11, [r3]
+ ldr r6, [r3, #24]
+ ldr r9, [r3, #20]
+ ldr r8, [r3, #36]
+ subs r11, lr, r11
+ str r6, [sp, #36] @ 4-byte Spill
+ ldr r6, [r3, #28]
+ ldr lr, [r3, #44]
+ sbcs r0, r4, r0
+ ldr r4, [sp, #72] @ 4-byte Reload
+ sbcs r1, r2, r1
+ sbcs r2, r5, r7
+ ldr r7, [r3, #32]
+ ldr r5, [r3, #40]
+ ldr r3, [sp, #80] @ 4-byte Reload
+ str r6, [sp, #40] @ 4-byte Spill
+ sbcs r10, r3, r10
+ ldr r3, [sp, #84] @ 4-byte Reload
+ sbcs r6, r3, r9
+ ldr r3, [sp, #36] @ 4-byte Reload
+ ldr r9, [sp, #40] @ 4-byte Reload
+ sbcs r3, r12, r3
+ ldr r12, [sp, #88] @ 4-byte Reload
+ sbcs r12, r12, r9
+ sbcs r7, r4, r7
+ ldr r4, [sp, #76] @ 4-byte Reload
+ str r7, [sp, #100] @ 4-byte Spill
+ ldr r7, [sp, #48] @ 4-byte Reload
+ sbcs r9, r4, r8
+ ldr r4, [sp, #96] @ 4-byte Reload
+ sbcs r8, r4, r5
+ ldr r4, [sp, #92] @ 4-byte Reload
+ ldr r5, [sp, #44] @ 4-byte Reload
+ sbcs lr, r4, lr
+ ldr r4, [sp, #64] @ 4-byte Reload
+ sbc r4, r4, #0
+ ands r4, r4, #1
+ movne r11, r5
+ ldr r5, [sp, #68] @ 4-byte Reload
+ movne r0, r7
+ str r11, [r5]
+ str r0, [r5, #4]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ cmp r4, #0
+ str r1, [r5, #8]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ movne r2, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r2, [r5, #12]
+ movne r10, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r10, [r5, #16]
+ movne r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ cmp r4, #0
+ str r6, [r5, #20]
+ movne r3, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r3, [r5, #24]
+ movne r12, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r12, [r5, #28]
+ movne r1, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ cmp r4, #0
+ str r1, [r5, #32]
+ movne r9, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r9, [r5, #36]
+ movne r8, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r8, [r5, #40]
+ movne lr, r0
+ str lr, [r5, #44]
+ add sp, sp, #428
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end178:
+ .size mcl_fp_mont12L, .Lfunc_end178-mcl_fp_mont12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF12L
+ .align 2
+ .type mcl_fp_montNF12L,%function
+mcl_fp_montNF12L: @ @mcl_fp_montNF12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #428
+ sub sp, sp, #428
+ .pad #1024
+ sub sp, sp, #1024
+ add r12, sp, #92
+ mov r4, r3
+ mov r7, r1
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, sp, #1392
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #88] @ 4-byte Spill
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1396]
+ ldr r8, [sp, #1392]
+ add r10, sp, #1024
+ mov r1, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1400]
+ mul r2, r8, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1404]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1440]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1436]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1432]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1428]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #1424]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1420]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1416]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1412]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1408]
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, r10, #312
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1384]
+ add r11, sp, #1344
+ ldr r9, [sp, #1356]
+ ldr r4, [sp, #1336]
+ ldr r6, [sp, #1340]
+ mov r1, r7
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1380]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1376]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1372]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1368]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1364]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1360]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r10, r11}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, sp, #1280
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r8
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #1280
+ ldr r7, [sp, #1316]
+ ldr r4, [sp, #1304]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r8, r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r6, [sp, #1312]
+ adcs r0, r5, r0
+ ldr r5, [sp, #1308]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #1324]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r11, r0
+ ldr r11, [sp, #1328]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #1320]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adc r0, r1, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r8, r8, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ add r5, sp, #1024
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r5, #200
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1272]
+ add r10, sp, #1232
+ ldr r6, [sp, #1248]
+ ldr r9, [sp, #1244]
+ ldr r11, [sp, #1224]
+ ldr r7, [sp, #1228]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1268]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1260]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1256]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1252]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #1168
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1180]
+ ldr r3, [sp, #1184]
+ ldr r12, [sp, #1188]
+ ldr lr, [sp, #1192]
+ ldr r8, [sp, #1208]
+ ldr r11, [sp, #1216]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1204]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1168]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1196]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1212]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1200]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r10, r10, r4
+ ldr r4, [sp, #80] @ 4-byte Reload
+ ldr r1, [sp, #1176]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1172]
+ adcs r0, r4, r0
+ mov r4, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ add r7, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r10, r0
+ add r0, r7, #88
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1160]
+ add r9, sp, #1120
+ ldr r5, [sp, #1136]
+ ldr r8, [sp, #1132]
+ ldr r10, [sp, #1112]
+ ldr r11, [sp, #1116]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1156]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1152]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1140]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #1056
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #1056
+ ldr r10, [sp, #1100]
+ ldr r4, [sp, #1080]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #1104]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1088]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1096]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1092]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1084]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1000
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #1048]
+ add r10, sp, #1008
+ ldr r6, [sp, #1024]
+ ldr r9, [sp, #1020]
+ ldr r11, [sp, #1000]
+ ldr r7, [sp, #1004]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1032]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #944
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add r11, sp, #972
+ add lr, sp, #948
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r5, r6, r7, r8, r9, r11}
+ ldr r4, [sp, #944]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r10, [sp, #84] @ 4-byte Reload
+ adds r10, r10, r4
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r10, r0
+ add r0, sp, #888
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #936]
+ add r9, sp, #896
+ ldr r5, [sp, #912]
+ ldr r8, [sp, #908]
+ ldr r10, [sp, #888]
+ ldr r11, [sp, #892]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #932]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #916]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #832
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #832
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #856
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #776
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #824]
+ add r10, sp, #784
+ ldr r6, [sp, #800]
+ ldr r9, [sp, #796]
+ ldr r11, [sp, #776]
+ ldr r7, [sp, #780]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #820]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #816]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #720
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add r11, sp, #748
+ add lr, sp, #724
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r5, r6, r7, r8, r9, r11}
+ ldr r4, [sp, #720]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r10, [sp, #84] @ 4-byte Reload
+ adds r10, r10, r4
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r10, r0
+ add r0, sp, #664
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #712]
+ add r9, sp, #672
+ ldr r5, [sp, #688]
+ ldr r8, [sp, #684]
+ ldr r10, [sp, #664]
+ ldr r11, [sp, #668]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #608
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #608
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #632
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #552
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #600]
+ add r10, sp, #560
+ ldr r6, [sp, #576]
+ ldr r9, [sp, #572]
+ ldr r11, [sp, #552]
+ ldr r7, [sp, #556]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #592]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #496
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add r11, sp, #524
+ add lr, sp, #500
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r5, r6, r7, r8, r9, r11}
+ ldr r4, [sp, #496]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r10, [sp, #84] @ 4-byte Reload
+ adds r10, r10, r4
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r2, r10, r0
+ add r0, sp, #440
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #488]
+ add r9, sp, #448
+ ldr r5, [sp, #464]
+ ldr r8, [sp, #460]
+ ldr r10, [sp, #440]
+ ldr r11, [sp, #444]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #484]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #480]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #476]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #468]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #384
+ bl .LmulPv384x32(PLT)
+ adds r0, r4, r10
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #384
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #408
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mul r2, r7, r4
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r11, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #328
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #376]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r6, [sp, #348]
+ ldr r10, [sp, #344]
+ ldr r11, [sp, #328]
+ ldr r7, [sp, #332]
+ ldr r9, [sp, #336]
+ ldr r5, [sp, #340]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #372]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #364]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #360]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #356]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #352]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #272
+ bl .LmulPv384x32(PLT)
+ adds r0, r8, r11
+ ldr r1, [sp, #80] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ ldr lr, [sp, #276]
+ add r12, sp, #288
+ ldr r8, [sp, #316]
+ ldr r11, [sp, #312]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ adcs r7, r1, r9
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r9, [sp, #320]
+ adcs r1, r1, r5
+ ldr r5, [sp, #280]
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r10
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #284]
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #272]
+ str r1, [sp, #36] @ 4-byte Spill
+ adds r0, r0, r2
+ adcs r7, r7, lr
+ mul r10, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r4, [sp, #308]
+ ldm r12, {r0, r1, r2, r3, r12}
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adcs r7, r7, r5
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adcs r7, r7, r6
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ mov r2, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ adc r0, r9, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #216
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #264]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r5, [sp, #244]
+ ldr r6, [sp, #240]
+ ldr r8, [sp, #236]
+ ldr r9, [sp, #232]
+ ldr r10, [sp, #216]
+ ldr r7, [sp, #220]
+ ldr r4, [sp, #224]
+ ldr r11, [sp, #228]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #256]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #252]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #248]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #160
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add r12, sp, #176
+ ldr lr, [sp, #164]
+ adds r0, r0, r10
+ add r10, sp, #200
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #172]
+ adcs r1, r1, r4
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #168]
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #160]
+ str r1, [sp, #48] @ 4-byte Spill
+ adds r4, r0, r2
+ ldr r0, [sp, #88] @ 4-byte Reload
+ mul r1, r4, r0
+ str r1, [sp, #44] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r11, [sp, #196]
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r5, [sp, #96] @ 4-byte Reload
+ adcs r5, r5, lr
+ str r5, [sp, #36] @ 4-byte Spill
+ ldr r5, [sp, #92] @ 4-byte Reload
+ adcs r6, r5, r6
+ ldr r5, [sp, #100] @ 4-byte Reload
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [sp, #84] @ 4-byte Reload
+ adcs r7, r6, r7
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r11, r0, r11
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r8, r0, r8
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r9, r0, r9
+ adc r0, r10, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ add r0, sp, #104
+ bl .LmulPv384x32(PLT)
+ add r6, sp, #104
+ ldm r6, {r0, r1, r2, r6}
+ adds r0, r4, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs lr, r0, r1
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r10, r0, r2
+ ldr r0, [sp, #120]
+ mov r2, r5
+ adcs r3, r7, r6
+ str r10, [sp, #52] @ 4-byte Spill
+ str r3, [sp, #56] @ 4-byte Spill
+ adcs r6, r1, r0
+ ldr r0, [sp, #124]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r6, [sp, #60] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #128]
+ adcs r0, r1, r0
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #132]
+ adcs r12, r1, r0
+ ldr r0, [sp, #136]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r12, [sp, #64] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #140]
+ adcs r0, r11, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #144]
+ adcs r0, r8, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #148]
+ adcs r0, r9, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #152]
+ adc r0, r1, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldmib r2, {r0, r1, r7, r9}
+ ldr r4, [r2, #24]
+ ldr r8, [r2]
+ ldr r5, [r2, #20]
+ str r4, [sp, #44] @ 4-byte Spill
+ ldr r4, [r2, #28]
+ subs r8, lr, r8
+ sbcs r0, r10, r0
+ sbcs r1, r3, r1
+ sbcs r7, r6, r7
+ str r4, [sp, #48] @ 4-byte Spill
+ mov r4, r2
+ ldr r2, [r4, #44]
+ ldr r10, [r4, #32]
+ ldr r6, [r4, #36]
+ ldr r11, [r4, #40]
+ ldr r4, [sp, #48] @ 4-byte Reload
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [sp, #76] @ 4-byte Reload
+ sbcs r9, r2, r9
+ ldr r2, [sp, #80] @ 4-byte Reload
+ sbcs r5, r2, r5
+ ldr r2, [sp, #44] @ 4-byte Reload
+ sbcs r3, r12, r2
+ ldr r2, [sp, #84] @ 4-byte Reload
+ sbcs r12, r2, r4
+ ldr r2, [sp, #88] @ 4-byte Reload
+ ldr r4, [sp, #40] @ 4-byte Reload
+ sbcs r10, r2, r10
+ ldr r2, [sp, #72] @ 4-byte Reload
+ sbcs r2, r2, r6
+ ldr r6, [sp, #52] @ 4-byte Reload
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [sp, #96] @ 4-byte Reload
+ sbcs r2, r2, r11
+ ldr r11, [sp, #68] @ 4-byte Reload
+ str r2, [sp, #100] @ 4-byte Spill
+ ldr r2, [sp, #92] @ 4-byte Reload
+ sbc r2, r2, r4
+ asr r4, r2, #31
+ cmp r4, #0
+ movlt r8, lr
+ movlt r0, r6
+ str r8, [r11]
+ str r0, [r11, #4]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ movlt r1, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ cmp r4, #0
+ str r1, [r11, #8]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ movlt r7, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r7, [r11, #12]
+ movlt r9, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r9, [r11, #16]
+ movlt r5, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ cmp r4, #0
+ str r5, [r11, #20]
+ movlt r3, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r3, [r11, #24]
+ ldr r3, [sp, #48] @ 4-byte Reload
+ movlt r12, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r12, [r11, #28]
+ movlt r10, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ cmp r4, #0
+ str r10, [r11, #32]
+ movlt r3, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r3, [r11, #36]
+ movlt r1, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r1, [r11, #40]
+ movlt r2, r0
+ str r2, [r11, #44]
+ add sp, sp, #428
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end179:
+ .size mcl_fp_montNF12L, .Lfunc_end179-mcl_fp_montNF12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed12L
+ .align 2
+ .type mcl_fp_montRed12L,%function
+mcl_fp_montRed12L: @ @mcl_fp_montRed12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #836
+ sub sp, sp, #836
+ mov r3, r2
+ str r0, [sp, #148] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r10, [r1]
+ ldr r0, [r3]
+ str r3, [sp, #152] @ 4-byte Spill
+ mov r5, r3
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #144] @ 4-byte Spill
+ ldr r0, [r3, #4]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [r3, #8]
+ str r2, [sp, #56] @ 4-byte Spill
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [r3, #12]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [r3, #20]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [r3, #24]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [r3, #-4]
+ str r0, [sp, #156] @ 4-byte Spill
+ mul r2, r10, r0
+ ldr r0, [r3, #28]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #72]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #76]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r1, #80]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #84]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #88]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r1, #92]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ mov r1, r3
+ str r0, [sp, #8] @ 4-byte Spill
+ add r0, sp, #776
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #824]
+ add r11, sp, #808
+ add lr, sp, #776
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r6, r8, r9, r11}
+ ldr r7, [sp, #804]
+ ldr r4, [sp, #800]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r10, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #156] @ 4-byte Reload
+ mul r2, r10, r0
+ add r0, sp, #720
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #768]
+ add lr, sp, #756
+ add r9, sp, #732
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #720]
+ ldr r6, [sp, #752]
+ ldr r11, [sp, #748]
+ ldr r2, [sp, #744]
+ ldr r1, [sp, #724]
+ ldr r7, [sp, #728]
+ ldm r9, {r0, r8, r9}
+ adds r4, r10, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r10, r4, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ mov r4, r5
+ adcs r1, r1, r7
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #156] @ 4-byte Reload
+ mul r2, r10, r0
+ add r0, sp, #664
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #712]
+ add r11, sp, #696
+ add lr, sp, #664
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r6, r8, r9, r11}
+ ldr r7, [sp, #692]
+ ldr r5, [sp, #688]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r10, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #156] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ mul r2, r10, r5
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #608
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #656]
+ add lr, sp, #644
+ add r9, sp, #620
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #608]
+ ldr r6, [sp, #640]
+ ldr r11, [sp, #636]
+ ldr r2, [sp, #632]
+ ldr r1, [sp, #612]
+ ldr r7, [sp, #616]
+ ldm r9, {r0, r8, r9}
+ adds r4, r10, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r10, r4, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r4, [sp, #152] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ mov r0, r5
+ mul r2, r10, r0
+ add r0, sp, #552
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #600]
+ add r11, sp, #584
+ add lr, sp, #552
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r11, {r6, r8, r9, r11}
+ ldr r7, [sp, #580]
+ ldr r5, [sp, #576]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r10, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #156] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ mul r2, r10, r5
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #496
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #544]
+ add lr, sp, #532
+ add r9, sp, #508
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #496]
+ ldr r6, [sp, #528]
+ ldr r11, [sp, #524]
+ ldr r2, [sp, #520]
+ ldr r1, [sp, #500]
+ ldr r7, [sp, #504]
+ ldm r9, {r0, r8, r9}
+ adds r4, r10, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r10, r4, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r4, [sp, #152] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #440
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #488]
+ add r11, sp, #472
+ add lr, sp, #440
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r6, r8, r9, r11}
+ ldr r7, [sp, #468]
+ ldr r5, [sp, #464]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r10, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #156] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ mul r2, r10, r5
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #384
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #432]
+ add lr, sp, #420
+ add r9, sp, #396
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #384]
+ ldr r6, [sp, #416]
+ ldr r11, [sp, #412]
+ ldr r2, [sp, #408]
+ ldr r1, [sp, #388]
+ ldr r7, [sp, #392]
+ ldm r9, {r0, r8, r9}
+ adds r4, r10, r4
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r10, r4, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ mov r4, r5
+ adcs r1, r1, r7
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #152] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #328
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #376]
+ add r11, sp, #352
+ add lr, sp, #328
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #372]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r5, r7, r8, r9, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r10, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ mov r5, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #272
+ bl .LmulPv384x32(PLT)
+ ldr r0, [sp, #320]
+ add lr, sp, #300
+ add r6, sp, #272
+ add r12, sp, #284
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm lr, {r4, r8, r9, r11, lr}
+ ldr r7, [sp, #296]
+ ldm r6, {r2, r3, r6}
+ ldm r12, {r0, r1, r12}
+ adds r2, r10, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r10, r2, r3
+ ldr r2, [sp, #60] @ 4-byte Reload
+ adcs r6, r2, r6
+ ldr r2, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #156] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r10, r4
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #216
+ bl .LmulPv384x32(PLT)
+ add r7, sp, #216
+ add lr, sp, #252
+ ldm r7, {r0, r1, r3, r7}
+ ldr r8, [sp, #264]
+ adds r0, r10, r0
+ adcs r10, r6, r1
+ mul r0, r10, r4
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #156] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ add r7, sp, #232
+ str r0, [sp, #52] @ 4-byte Spill
+ ldm lr, {r6, r12, lr}
+ ldm r7, {r0, r1, r2, r3, r7}
+ ldr r4, [sp, #96] @ 4-byte Reload
+ adcs r9, r4, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r11
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r4, r0, r3
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r5, r0, r7
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r6, r0, r6
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ add r0, sp, #160
+ bl .LmulPv384x32(PLT)
+ add r3, sp, #160
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r10, r0
+ ldr r0, [sp, #156] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ adcs r12, r0, r2
+ ldr r2, [sp, #176]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r3, r9, r3
+ str r12, [sp, #52] @ 4-byte Spill
+ str r3, [sp, #56] @ 4-byte Spill
+ adcs r7, r0, r2
+ ldr r2, [sp, #180]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ str r7, [sp, #60] @ 4-byte Spill
+ adcs r8, r0, r2
+ ldr r2, [sp, #184]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r8, [sp, #64] @ 4-byte Spill
+ adcs r4, r4, r2
+ ldr r2, [sp, #188]
+ str r4, [sp, #68] @ 4-byte Spill
+ adcs r5, r5, r2
+ ldr r2, [sp, #192]
+ str r5, [sp, #72] @ 4-byte Spill
+ adcs r6, r6, r2
+ ldr r2, [sp, #196]
+ str r6, [sp, #76] @ 4-byte Spill
+ adcs r9, r0, r2
+ ldr r2, [sp, #200]
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r9, [sp, #84] @ 4-byte Spill
+ adcs r10, r0, r2
+ ldr r2, [sp, #204]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r10, [sp, #96] @ 4-byte Spill
+ adcs lr, r0, r2
+ ldr r2, [sp, #208]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str lr, [sp, #156] @ 4-byte Spill
+ adcs r11, r0, r2
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r2, [sp, #136] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #144] @ 4-byte Reload
+ subs r0, r1, r0
+ ldr r1, [sp, #140] @ 4-byte Reload
+ sbcs r1, r12, r1
+ sbcs r2, r3, r2
+ ldr r3, [sp, #120] @ 4-byte Reload
+ sbcs r3, r7, r3
+ ldr r7, [sp, #124] @ 4-byte Reload
+ sbcs r12, r8, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ sbcs r7, r4, r7
+ ldr r4, [sp, #132] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #100] @ 4-byte Reload
+ sbcs r8, r6, r5
+ ldr r6, [sp, #104] @ 4-byte Reload
+ sbcs r5, r9, r6
+ ldr r6, [sp, #108] @ 4-byte Reload
+ str r5, [sp, #144] @ 4-byte Spill
+ ldr r5, [sp, #92] @ 4-byte Reload
+ sbcs r9, r10, r6
+ ldr r6, [sp, #112] @ 4-byte Reload
+ sbcs r6, lr, r6
+ mov lr, r11
+ ldr r11, [sp, #148] @ 4-byte Reload
+ str r6, [sp, #152] @ 4-byte Spill
+ ldr r6, [sp, #116] @ 4-byte Reload
+ sbcs r10, lr, r6
+ sbc r6, r5, #0
+ ldr r5, [sp, #48] @ 4-byte Reload
+ ands r6, r6, #1
+ movne r0, r5
+ str r0, [r11]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r1, [r11, #4]
+ ldr r1, [sp, #156] @ 4-byte Reload
+ movne r2, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ cmp r6, #0
+ str r2, [r11, #8]
+ ldr r2, [sp, #144] @ 4-byte Reload
+ movne r3, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r3, [r11, #12]
+ movne r12, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r12, [r11, #16]
+ movne r7, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ cmp r6, #0
+ str r7, [r11, #20]
+ movne r4, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r4, [r11, #24]
+ movne r8, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r8, [r11, #28]
+ movne r2, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ cmp r6, #0
+ movne r10, lr
+ str r2, [r11, #32]
+ movne r9, r0
+ ldr r0, [sp, #152] @ 4-byte Reload
+ movne r0, r1
+ str r9, [r11, #36]
+ str r0, [r11, #40]
+ str r10, [r11, #44]
+ add sp, sp, #836
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end180:
+ .size mcl_fp_montRed12L, .Lfunc_end180-mcl_fp_montRed12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre12L
+ .align 2
+ .type mcl_fp_addPre12L,%function
+mcl_fp_addPre12L: @ @mcl_fp_addPre12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #28
+ sub sp, sp, #28
+ ldm r1, {r3, r12, lr}
+ ldr r9, [r1, #12]
+ ldmib r2, {r5, r6, r7}
+ ldr r4, [r2, #16]
+ ldr r11, [r2]
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ adds r8, r11, r3
+ ldr r3, [r2, #36]
+ ldr r11, [r2, #32]
+ adcs r5, r5, r12
+ add r12, r1, #16
+ adcs r6, r6, lr
+ add lr, r1, #32
+ adcs r7, r7, r9
+ str r4, [sp, #4] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r2, #28]
+ ldr r2, [r2, #44]
+ str r3, [sp, #20] @ 4-byte Spill
+ str r4, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm lr, {r4, r10, lr}
+ ldr r9, [r1, #44]
+ ldm r12, {r1, r2, r3, r12}
+ str r8, [r0]
+ stmib r0, {r5, r6}
+ str r7, [r0, #12]
+ ldr r5, [sp] @ 4-byte Reload
+ ldr r7, [sp, #24] @ 4-byte Reload
+ adcs r1, r5, r1
+ ldr r5, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r2, r5, r2
+ str r2, [r0, #20]
+ ldr r2, [sp, #12] @ 4-byte Reload
+ adcs r1, r1, r3
+ ldr r3, [sp, #20] @ 4-byte Reload
+ str r1, [r0, #24]
+ adcs r2, r2, r12
+ str r2, [r0, #28]
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r1, r11, r4
+ add r0, r0, #32
+ adcs r2, r2, r10
+ adcs r3, r3, lr
+ adcs r7, r7, r9
+ stm r0, {r1, r2, r3, r7}
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #28
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end181:
+ .size mcl_fp_addPre12L, .Lfunc_end181-mcl_fp_addPre12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre12L
+ .align 2
+ .type mcl_fp_subPre12L,%function
+mcl_fp_subPre12L: @ @mcl_fp_subPre12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #28
+ sub sp, sp, #28
+ ldmib r2, {r8, r12, lr}
+ ldr r3, [r2, #16]
+ ldr r7, [r2]
+ ldr r6, [r1]
+ ldr r5, [r1, #4]
+ ldr r4, [r1, #8]
+ ldr r11, [r2, #44]
+ ldr r9, [r1, #32]
+ ldr r10, [r1, #36]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r2, #20]
+ subs r6, r6, r7
+ ldr r7, [r2, #32]
+ sbcs r5, r5, r8
+ ldr r8, [r1, #40]
+ sbcs r4, r4, r12
+ add r12, r1, #16
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [r2, #24]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r3, [sp, #24] @ 4-byte Spill
+ ldr r3, [r1, #12]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r1, #44]
+ sbcs lr, r3, lr
+ ldm r12, {r1, r2, r3, r12}
+ str r6, [r0]
+ str r5, [r0, #4]
+ str r4, [r0, #8]
+ ldr r4, [sp, #12] @ 4-byte Reload
+ ldr r6, [sp, #16] @ 4-byte Reload
+ str lr, [r0, #12]
+ sbcs r1, r1, r4
+ str r1, [r0, #16]
+ sbcs r2, r2, r6
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ sbcs r1, r3, r1
+ ldr r3, [sp, #8] @ 4-byte Reload
+ str r1, [r0, #24]
+ sbcs r2, r12, r2
+ ldr r1, [sp] @ 4-byte Reload
+ str r2, [r0, #28]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ add r0, r0, #32
+ sbcs r1, r9, r1
+ sbcs r2, r10, r2
+ sbcs r3, r8, r3
+ sbcs r7, r7, r11
+ stm r0, {r1, r2, r3, r7}
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #28
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end182:
+ .size mcl_fp_subPre12L, .Lfunc_end182-mcl_fp_subPre12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_12L
+ .align 2
+ .type mcl_fp_shr1_12L,%function
+mcl_fp_shr1_12L: @ @mcl_fp_shr1_12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #4
+ sub sp, sp, #4
+ add r6, r1, #20
+ ldr r3, [r1, #8]
+ ldr r2, [r1, #12]
+ ldr lr, [r1, #16]
+ add r11, r1, #32
+ ldm r6, {r4, r5, r6}
+ ldm r1, {r8, r12}
+ lsr r7, r12, #1
+ orr r9, r7, r3, lsl #31
+ ldm r11, {r7, r10, r11}
+ ldr r1, [r1, #44]
+ str r1, [sp] @ 4-byte Spill
+ lsr r1, r2, #1
+ lsrs r2, r2, #1
+ rrx r2, r3
+ lsrs r3, r12, #1
+ orr r1, r1, lr, lsl #31
+ rrx r3, r8
+ stm r0, {r3, r9}
+ str r2, [r0, #8]
+ str r1, [r0, #12]
+ lsrs r1, r4, #1
+ lsr r2, r10, #1
+ rrx r1, lr
+ orr r2, r2, r11, lsl #31
+ str r1, [r0, #16]
+ lsr r1, r4, #1
+ orr r1, r1, r5, lsl #31
+ str r1, [r0, #20]
+ lsrs r1, r6, #1
+ rrx r1, r5
+ str r1, [r0, #24]
+ lsr r1, r6, #1
+ orr r1, r1, r7, lsl #31
+ str r1, [r0, #28]
+ lsrs r1, r10, #1
+ add r0, r0, #32
+ rrx r1, r7
+ ldr r7, [sp] @ 4-byte Reload
+ lsrs r3, r7, #1
+ lsr r7, r7, #1
+ rrx r3, r11
+ stm r0, {r1, r2, r3, r7}
+ add sp, sp, #4
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end183:
+ .size mcl_fp_shr1_12L, .Lfunc_end183-mcl_fp_shr1_12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add12L
+ .align 2
+ .type mcl_fp_add12L,%function
+mcl_fp_add12L: @ @mcl_fp_add12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ ldm r1, {r12, lr}
+ ldr r5, [r2]
+ ldr r8, [r1, #8]
+ ldr r9, [r1, #12]
+ ldmib r2, {r4, r6, r7}
+ ldr r11, [r1, #40]
+ adds r5, r5, r12
+ ldr r12, [r2, #40]
+ adcs r4, r4, lr
+ str r5, [sp, #40] @ 4-byte Spill
+ ldr r5, [r1, #24]
+ ldr lr, [r1, #32]
+ adcs r6, r6, r8
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r1, #20]
+ ldr r8, [r1, #36]
+ adcs r7, r7, r9
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [r1, #16]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #16]
+ adcs r10, r7, r6
+ ldr r6, [r2, #20]
+ adcs r7, r6, r4
+ ldr r4, [r2, #24]
+ str r7, [sp, #12] @ 4-byte Spill
+ adcs r7, r4, r5
+ ldr r4, [r1, #28]
+ ldr r5, [r2, #28]
+ str r7, [sp, #4] @ 4-byte Spill
+ adcs r6, r5, r4
+ ldr r5, [r2, #32]
+ ldr r4, [r1, #44]
+ ldr r1, [r2, #36]
+ ldr r2, [r2, #44]
+ str r6, [sp, #8] @ 4-byte Spill
+ adcs r9, r5, lr
+ ldr lr, [sp, #32] @ 4-byte Reload
+ adcs r5, r1, r8
+ ldr r1, [sp, #40] @ 4-byte Reload
+ ldr r8, [sp, #12] @ 4-byte Reload
+ adcs r11, r12, r11
+ ldr r12, [sp, #36] @ 4-byte Reload
+ str r5, [sp, #28] @ 4-byte Spill
+ adcs r2, r2, r4
+ ldr r4, [sp, #16] @ 4-byte Reload
+ str r2, [sp, #24] @ 4-byte Spill
+ str r1, [r0]
+ str r12, [r0, #4]
+ str lr, [r0, #8]
+ str r4, [r0, #12]
+ str r10, [r0, #16]
+ str r8, [r0, #20]
+ str r7, [r0, #24]
+ str r6, [r0, #28]
+ str r9, [r0, #32]
+ str r5, [r0, #36]
+ str r11, [r0, #40]
+ str r2, [r0, #44]
+ mov r2, #0
+ adc r2, r2, #0
+ str r2, [sp, #20] @ 4-byte Spill
+ ldm r3, {r2, r6, r7}
+ ldr r5, [r3, #12]
+ subs r1, r1, r2
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ sbcs r1, r12, r6
+ str r1, [sp] @ 4-byte Spill
+ sbcs r1, lr, r7
+ str r1, [sp, #36] @ 4-byte Spill
+ sbcs r1, r4, r5
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ sbcs r1, r10, r1
+ add r10, r3, #36
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ sbcs r6, r8, r1
+ ldr r1, [r3, #24]
+ sbcs lr, r2, r1
+ ldr r2, [r3, #28]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ sbcs r12, r1, r2
+ ldr r2, [r3, #32]
+ ldm r10, {r1, r4, r10}
+ sbcs r7, r9, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ sbcs r2, r2, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ sbcs r3, r11, r4
+ sbcs r5, r1, r10
+ ldr r1, [sp, #20] @ 4-byte Reload
+ sbc r1, r1, #0
+ tst r1, #1
+ bne .LBB184_2
+@ BB#1: @ %nocarry
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r1, [r0]
+ ldr r1, [sp] @ 4-byte Reload
+ str r1, [r0, #4]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r1, [r0, #8]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r1, [r0, #12]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r1, [r0, #16]
+ str r6, [r0, #20]
+ str lr, [r0, #24]
+ str r12, [r0, #28]
+ str r7, [r0, #32]
+ add r0, r0, #36
+ stm r0, {r2, r3, r5}
+.LBB184_2: @ %carry
+ add sp, sp, #44
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end184:
+ .size mcl_fp_add12L, .Lfunc_end184-mcl_fp_add12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF12L
+ .align 2
+ .type mcl_fp_addNF12L,%function
+mcl_fp_addNF12L: @ @mcl_fp_addNF12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ ldm r1, {r5, r8, lr}
+ ldr r6, [r2]
+ ldr r10, [r1, #12]
+ ldmib r2, {r4, r7, r9}
+ ldr r12, [r1, #20]
+ adds r6, r6, r5
+ ldr r5, [r1, #24]
+ adcs r8, r4, r8
+ ldr r4, [r2, #16]
+ str r6, [sp, #16] @ 4-byte Spill
+ adcs r7, r7, lr
+ add lr, r2, #32
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ adcs r6, r9, r10
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [r1, #44]
+ adcs r7, r4, r7
+ ldr r4, [r1, #40]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ adcs r7, r7, r12
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ adcs r7, r7, r5
+ ldr r5, [r2, #28]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r1, #28]
+ adcs r7, r5, r7
+ ldr r5, [r1, #36]
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ ldm lr, {r1, r12, lr}
+ ldr r2, [r2, #44]
+ adcs r1, r1, r7
+ str r1, [sp, #20] @ 4-byte Spill
+ adcs r1, r12, r5
+ str r1, [sp, #28] @ 4-byte Spill
+ adcs r1, lr, r4
+ str r1, [sp, #36] @ 4-byte Spill
+ adc r1, r2, r6
+ str r1, [sp, #44] @ 4-byte Spill
+ ldmib r3, {r1, r2, r6, r11}
+ ldr r7, [r3, #20]
+ ldr r4, [r3, #32]
+ ldr r9, [r3]
+ ldr r5, [sp, #16] @ 4-byte Reload
+ ldr lr, [r3, #24]
+ ldr r10, [r3, #28]
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r3, #36]
+ subs r9, r5, r9
+ sbcs r1, r8, r1
+ sbcs r2, r7, r2
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [r3, #40]
+ sbcs r12, r7, r6
+ ldr r7, [r3, #44]
+ ldr r3, [sp, #40] @ 4-byte Reload
+ str r4, [sp, #4] @ 4-byte Spill
+ ldr r4, [sp, #48] @ 4-byte Reload
+ ldr r6, [sp, #12] @ 4-byte Reload
+ sbcs r3, r3, r11
+ sbcs r11, r4, r6
+ ldr r4, [sp, #56] @ 4-byte Reload
+ ldr r6, [sp, #8] @ 4-byte Reload
+ sbcs lr, r4, lr
+ ldr r4, [sp, #52] @ 4-byte Reload
+ sbcs r10, r4, r10
+ ldr r4, [sp, #20] @ 4-byte Reload
+ sbcs r4, r4, r6
+ ldr r6, [sp] @ 4-byte Reload
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [sp, #28] @ 4-byte Reload
+ sbcs r4, r4, r6
+ ldr r6, [sp, #36] @ 4-byte Reload
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [sp, #4] @ 4-byte Reload
+ sbcs r6, r6, r4
+ str r6, [sp, #12] @ 4-byte Spill
+ ldr r6, [sp, #44] @ 4-byte Reload
+ sbc r6, r6, r7
+ asr r7, r6, #31
+ cmp r7, #0
+ movlt r9, r5
+ movlt r1, r8
+ str r9, [r0]
+ str r1, [r0, #4]
+ ldr r1, [sp, #24] @ 4-byte Reload
+ movlt r2, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ cmp r7, #0
+ str r2, [r0, #8]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ movlt r12, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r12, [r0, #12]
+ movlt r3, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r3, [r0, #16]
+ ldr r3, [sp, #12] @ 4-byte Reload
+ movlt r11, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ cmp r7, #0
+ str r11, [r0, #20]
+ movlt lr, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str lr, [r0, #24]
+ movlt r10, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r10, [r0, #28]
+ movlt r2, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ cmp r7, #0
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #32]
+ movlt r7, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r7, [r0, #36]
+ movlt r3, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r3, [r0, #40]
+ movlt r6, r1
+ str r6, [r0, #44]
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end185:
+ .size mcl_fp_addNF12L, .Lfunc_end185-mcl_fp_addNF12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub12L
+ .align 2
+ .type mcl_fp_sub12L,%function
+mcl_fp_sub12L: @ @mcl_fp_sub12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #64
+ sub sp, sp, #64
+ ldr r9, [r2]
+ ldmib r2, {r8, r12, lr}
+ ldm r1, {r4, r5, r6, r7}
+ add r10, r1, #32
+ subs r4, r4, r9
+ sbcs r5, r5, r8
+ str r4, [sp, #48] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ sbcs r6, r6, r12
+ str r5, [sp, #52] @ 4-byte Spill
+ ldr r5, [r2, #20]
+ sbcs r7, r7, lr
+ str r6, [sp, #56] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ sbcs r11, r7, r6
+ ldr r6, [r1, #20]
+ str r11, [sp, #28] @ 4-byte Spill
+ sbcs lr, r6, r5
+ ldr r5, [r1, #24]
+ str lr, [sp, #40] @ 4-byte Spill
+ sbcs r7, r5, r4
+ ldr r4, [r2, #28]
+ ldr r5, [r1, #28]
+ str r7, [sp, #44] @ 4-byte Spill
+ add r7, r2, #32
+ sbcs r12, r5, r4
+ str r12, [sp, #36] @ 4-byte Spill
+ ldm r7, {r4, r5, r6, r7}
+ ldm r10, {r2, r8, r9, r10}
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r4, r2, r4
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [r0]
+ sbcs r8, r8, r5
+ str r4, [sp, #32] @ 4-byte Spill
+ sbcs r6, r9, r6
+ sbcs r7, r10, r7
+ ldr r10, [sp, #52] @ 4-byte Reload
+ str r10, [r0, #4]
+ str r2, [r0, #8]
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r2, [r0, #12]
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r11, [r0, #16]
+ str lr, [r0, #20]
+ str r2, [r0, #24]
+ str r12, [r0, #28]
+ str r4, [r0, #32]
+ mov r4, #0
+ str r8, [r0, #36]
+ str r6, [r0, #40]
+ str r7, [r0, #44]
+ sbc r4, r4, #0
+ tst r4, #1
+ beq .LBB186_2
+@ BB#1: @ %carry
+ ldr r5, [r3, #32]
+ ldr r4, [r3, #20]
+ ldr r12, [r3, #28]
+ ldr r9, [r3, #4]
+ ldr lr, [r3, #12]
+ ldr r11, [r3, #16]
+ str r5, [sp, #12] @ 4-byte Spill
+ ldr r5, [r3, #36]
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [r3, #24]
+ str r12, [sp, #8] @ 4-byte Spill
+ str r5, [sp, #16] @ 4-byte Spill
+ ldr r5, [r3, #40]
+ str r4, [sp, #4] @ 4-byte Spill
+ str r5, [sp, #20] @ 4-byte Spill
+ ldr r5, [r3, #44]
+ str r5, [sp, #24] @ 4-byte Spill
+ ldr r5, [r3, #8]
+ ldr r3, [r3]
+ adds r3, r3, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r4, r9, r10
+ adcs r5, r5, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ stm r0, {r3, r4, r5}
+ ldr r3, [sp] @ 4-byte Reload
+ adcs r1, lr, r1
+ str r1, [r0, #12]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r1, r11, r1
+ str r1, [r0, #16]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r3, r1
+ ldr r3, [sp, #20] @ 4-byte Reload
+ str r1, [r0, #20]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #28]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ add r0, r0, #32
+ adcs r1, r2, r1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ adcs r2, r2, r8
+ adcs r3, r3, r6
+ ldr r6, [sp, #24] @ 4-byte Reload
+ adc r7, r6, r7
+ stm r0, {r1, r2, r3, r7}
+.LBB186_2: @ %nocarry
+ add sp, sp, #64
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end186:
+ .size mcl_fp_sub12L, .Lfunc_end186-mcl_fp_sub12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF12L
+ .align 2
+ .type mcl_fp_subNF12L,%function
+mcl_fp_subNF12L: @ @mcl_fp_subNF12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #56
+ sub sp, sp, #56
+ mov r12, r0
+ ldr r0, [r2, #32]
+ add r11, r2, #8
+ ldr r6, [r2]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r2, #36]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r2, #40]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r2, #44]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r2, #4]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r8, r10, r11}
+ ldr r0, [r2, #20]
+ ldr lr, [r1, #16]
+ ldr r7, [r1, #20]
+ ldr r5, [r1, #24]
+ ldr r4, [r1, #28]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r2, #24]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r2, #28]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #12]
+ ldm r1, {r1, r2, r9}
+ subs r1, r1, r6
+ ldr r6, [sp, #36] @ 4-byte Reload
+ sbcs r2, r2, r6
+ sbcs r6, r9, r8
+ mov r9, r2
+ sbcs r10, r0, r10
+ str r6, [sp, #4] @ 4-byte Spill
+ sbcs r0, lr, r11
+ add r11, r3, #8
+ ldr lr, [r3, #4]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbcs r0, r5, r0
+ ldr r5, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ sbcs r0, r4, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ sbc r0, r5, r7
+ ldr r7, [r3, #36]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [r3, #40]
+ str r0, [sp] @ 4-byte Spill
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r3, #44]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldm r11, {r7, r8, r11}
+ ldr r4, [r3, #28]
+ ldr r5, [r3, #20]
+ ldr r0, [r3, #24]
+ ldr r3, [r3]
+ str r4, [sp, #8] @ 4-byte Spill
+ mov r4, r1
+ adds r1, r4, r3
+ ldr r3, [sp, #36] @ 4-byte Reload
+ adcs r2, r9, lr
+ adcs lr, r6, r7
+ adcs r6, r10, r8
+ adcs r7, r3, r11
+ ldr r3, [sp, #40] @ 4-byte Reload
+ adcs r8, r3, r5
+ ldr r3, [sp, #44] @ 4-byte Reload
+ adcs r5, r3, r0
+ ldr r3, [sp, #48] @ 4-byte Reload
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r11, r3, r0
+ ldr r3, [sp, #52] @ 4-byte Reload
+ ldr r0, [sp] @ 4-byte Reload
+ adcs r3, r3, r0
+ ldr r0, [sp, #12] @ 4-byte Reload
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r3, r3, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r3
+ ldr r3, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r3, r0, r3
+ str r3, [sp, #20] @ 4-byte Spill
+ asr r3, r0, #31
+ ldr r0, [sp, #4] @ 4-byte Reload
+ cmp r3, #0
+ movge r1, r4
+ movge r2, r9
+ str r1, [r12]
+ str r2, [r12, #4]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r2, [sp, #32] @ 4-byte Reload
+ movge lr, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ cmp r3, #0
+ movge r6, r10
+ str lr, [r12, #8]
+ str r6, [r12, #12]
+ movge r7, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ str r7, [r12, #16]
+ ldr r7, [sp, #24] @ 4-byte Reload
+ movge r8, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ cmp r3, #0
+ str r8, [r12, #20]
+ movge r5, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ str r5, [r12, #24]
+ movge r11, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r11, [r12, #28]
+ movge r1, r0
+ cmp r3, #0
+ ldr r3, [sp, #28] @ 4-byte Reload
+ ldr r0, [sp, #12] @ 4-byte Reload
+ movge r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [r12, #32]
+ add r1, r12, #36
+ movge r2, r3
+ ldr r3, [sp, #20] @ 4-byte Reload
+ movge r3, r7
+ stm r1, {r0, r2, r3}
+ add sp, sp, #56
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end187:
+ .size mcl_fp_subNF12L, .Lfunc_end187-mcl_fp_subNF12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add12L
+ .align 2
+ .type mcl_fpDbl_add12L,%function
+mcl_fpDbl_add12L: @ @mcl_fpDbl_add12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #136
+ sub sp, sp, #136
+ ldm r1, {r7, r8, r12, lr}
+ ldm r2, {r4, r5, r6, r9}
+ ldr r10, [r2, #20]
+ adds r4, r4, r7
+ str r4, [sp, #80] @ 4-byte Spill
+ ldr r4, [r2, #64]
+ str r4, [sp, #108] @ 4-byte Spill
+ ldr r4, [r2, #68]
+ str r4, [sp, #112] @ 4-byte Spill
+ ldr r4, [r2, #72]
+ str r4, [sp, #116] @ 4-byte Spill
+ ldr r4, [r2, #76]
+ str r4, [sp, #120] @ 4-byte Spill
+ ldr r4, [r2, #80]
+ str r4, [sp, #124] @ 4-byte Spill
+ ldr r4, [r2, #84]
+ str r4, [sp, #128] @ 4-byte Spill
+ ldr r4, [r2, #88]
+ str r4, [sp, #132] @ 4-byte Spill
+ ldr r4, [r2, #92]
+ str r4, [sp, #76] @ 4-byte Spill
+ adcs r4, r5, r8
+ adcs r7, r6, r12
+ ldr r6, [r2, #16]
+ str r4, [sp, #28] @ 4-byte Spill
+ str r7, [sp, #24] @ 4-byte Spill
+ adcs r7, r9, lr
+ add r9, r1, #32
+ add lr, r1, #16
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #100] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #104] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r2, [r1, #64]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r7, [sp, #16] @ 4-byte Spill
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldm r9, {r4, r5, r8, r9}
+ ldr r2, [r1, #48]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #80] @ 4-byte Reload
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r1, r6, r1
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #24] @ 4-byte Reload
+ ldr r6, [sp, #32] @ 4-byte Reload
+ adcs r2, r10, r2
+ ldr r10, [r3]
+ str r7, [r0, #8]
+ str r6, [r0, #12]
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ ldr r7, [sp] @ 4-byte Reload
+ adcs r1, r1, r12
+ str r1, [r0, #24]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [r0, #28]
+ ldr r2, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [r0, #32]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r2, r2, r5
+ ldr r5, [r3, #12]
+ str r2, [r0, #36]
+ ldr r2, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [r0, #40]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r2, r2, r9
+ str r2, [r0, #44]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ adcs r12, r1, r7
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r12, [sp, #80] @ 4-byte Spill
+ adcs r8, r1, r2
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r8, [sp, #88] @ 4-byte Spill
+ adcs lr, r1, r2
+ ldr r1, [sp, #104] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str lr, [sp, #92] @ 4-byte Spill
+ adcs r4, r1, r2
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r4, [sp, #104] @ 4-byte Spill
+ adcs r9, r1, r2
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r9, [sp, #96] @ 4-byte Spill
+ adcs r11, r1, r2
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r11, [sp, #108] @ 4-byte Spill
+ adcs r6, r1, r2
+ ldr r1, [sp, #120] @ 4-byte Reload
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r6, [sp, #112] @ 4-byte Spill
+ adcs r7, r1, r2
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r7, [sp, #116] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #128] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [sp, #132] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [r3, #8]
+ str r1, [sp, #132] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ subs r10, r12, r10
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [r3, #4]
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ sbcs r1, r8, r1
+ ldr r8, [r3, #40]
+ sbcs r2, lr, r2
+ ldr lr, [r3, #32]
+ sbcs r12, r4, r5
+ ldr r4, [r3, #36]
+ ldr r3, [r3, #44]
+ ldr r5, [sp, #72] @ 4-byte Reload
+ str r3, [sp, #64] @ 4-byte Spill
+ ldr r3, [sp, #68] @ 4-byte Reload
+ sbcs r3, r9, r3
+ sbcs r9, r11, r5
+ ldr r5, [sp, #76] @ 4-byte Reload
+ sbcs r5, r6, r5
+ ldr r6, [sp, #84] @ 4-byte Reload
+ sbcs r6, r7, r6
+ ldr r7, [sp, #124] @ 4-byte Reload
+ sbcs r11, r7, lr
+ ldr r7, [sp, #120] @ 4-byte Reload
+ sbcs lr, r7, r4
+ ldr r7, [sp, #128] @ 4-byte Reload
+ ldr r4, [sp, #64] @ 4-byte Reload
+ sbcs r8, r7, r8
+ ldr r7, [sp, #132] @ 4-byte Reload
+ sbcs r4, r7, r4
+ ldr r7, [sp, #100] @ 4-byte Reload
+ str r4, [sp, #84] @ 4-byte Spill
+ ldr r4, [sp, #80] @ 4-byte Reload
+ sbc r7, r7, #0
+ ands r7, r7, #1
+ movne r10, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ str r10, [r0, #48]
+ movne r1, r4
+ str r1, [r0, #52]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ cmp r7, #0
+ str r2, [r0, #56]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ movne r12, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r12, [r0, #60]
+ movne r3, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r3, [r0, #64]
+ movne r9, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ cmp r7, #0
+ str r9, [r0, #68]
+ movne r5, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r5, [r0, #72]
+ movne r6, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r6, [r0, #76]
+ movne r11, r1
+ ldr r1, [sp, #120] @ 4-byte Reload
+ cmp r7, #0
+ str r11, [r0, #80]
+ movne lr, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str lr, [r0, #84]
+ movne r8, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r8, [r0, #88]
+ movne r2, r1
+ str r2, [r0, #92]
+ add sp, sp, #136
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end188:
+ .size mcl_fpDbl_add12L, .Lfunc_end188-mcl_fpDbl_add12L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub12L
+ .align 2
+ .type mcl_fpDbl_sub12L,%function
+mcl_fpDbl_sub12L: @ @mcl_fpDbl_sub12L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #136
+ sub sp, sp, #136
+ ldr r7, [r2, #64]
+ str r7, [sp, #128] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #104] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #108] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #116] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #100] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2]
+ ldmib r2, {r6, r9}
+ ldr r5, [r1]
+ ldr r8, [r2, #12]
+ ldmib r1, {r4, lr}
+ ldr r12, [r1, #12]
+ ldr r10, [r2, #20]
+ subs r5, r5, r7
+ sbcs r4, r4, r6
+ str r5, [sp, #32] @ 4-byte Spill
+ ldr r5, [r2, #36]
+ ldr r6, [r2, #16]
+ sbcs r7, lr, r9
+ str r4, [sp, #24] @ 4-byte Spill
+ ldr r4, [r2, #32]
+ add r9, r1, #32
+ add lr, r1, #16
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r5, [sp, #44] @ 4-byte Spill
+ str r4, [sp, #40] @ 4-byte Spill
+ str r7, [sp, #36] @ 4-byte Spill
+ sbcs r7, r12, r8
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r2, [r1, #64]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r7, [sp, #28] @ 4-byte Spill
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #76] @ 4-byte Spill
+ ldm r9, {r4, r5, r8, r9}
+ ldr r2, [r1, #48]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #32] @ 4-byte Reload
+ ldr r7, [sp, #24] @ 4-byte Reload
+ sbcs r1, r1, r6
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ ldr r6, [sp, #8] @ 4-byte Reload
+ sbcs r2, r2, r10
+ str r7, [r0, #8]
+ str r6, [r0, #12]
+ str r1, [r0, #16]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp] @ 4-byte Reload
+ sbcs r1, r12, r1
+ str r1, [r0, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r2, lr, r2
+ str r2, [r0, #28]
+ ldr r2, [sp, #44] @ 4-byte Reload
+ sbcs r1, r4, r1
+ str r1, [r0, #32]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r2, r5, r2
+ str r2, [r0, #36]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ sbcs r1, r8, r1
+ str r1, [r0, #40]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbcs r2, r9, r2
+ str r2, [r0, #44]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ sbcs r9, r7, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #16] @ 4-byte Reload
+ str r9, [sp, #40] @ 4-byte Spill
+ sbcs lr, r2, r1
+ ldr r2, [sp, #96] @ 4-byte Reload
+ mov r1, #0
+ str lr, [sp, #44] @ 4-byte Spill
+ sbcs r2, r7, r2
+ ldr r7, [sp, #20] @ 4-byte Reload
+ str r2, [sp, #92] @ 4-byte Spill
+ ldr r2, [sp, #100] @ 4-byte Reload
+ sbcs r4, r7, r2
+ ldr r2, [sp, #128] @ 4-byte Reload
+ ldr r7, [sp, #48] @ 4-byte Reload
+ str r4, [sp, #88] @ 4-byte Spill
+ sbcs r2, r7, r2
+ ldr r7, [sp, #52] @ 4-byte Reload
+ str r2, [sp, #128] @ 4-byte Spill
+ ldr r2, [sp, #104] @ 4-byte Reload
+ sbcs r5, r7, r2
+ ldr r2, [sp, #132] @ 4-byte Reload
+ ldr r7, [sp, #56] @ 4-byte Reload
+ str r5, [sp, #96] @ 4-byte Spill
+ sbcs r2, r7, r2
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r2, [sp, #132] @ 4-byte Spill
+ ldr r2, [sp, #108] @ 4-byte Reload
+ sbcs r8, r7, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r8, [sp, #104] @ 4-byte Spill
+ sbcs r10, r7, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r10, [sp, #108] @ 4-byte Spill
+ sbcs r6, r7, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ ldr r7, [sp, #72] @ 4-byte Reload
+ str r6, [sp, #112] @ 4-byte Spill
+ sbcs r2, r7, r2
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r2, [sp, #124] @ 4-byte Spill
+ ldr r2, [sp, #120] @ 4-byte Reload
+ sbcs r2, r7, r2
+ sbc r1, r1, #0
+ str r2, [sp, #120] @ 4-byte Spill
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #116] @ 4-byte Spill
+ ldmib r3, {r1, r2, r12}
+ ldr r7, [r3, #16]
+ ldr r11, [r3, #20]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r3, #24]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r3, #28]
+ ldr r3, [r3]
+ adds r3, r9, r3
+ ldr r9, [sp, #92] @ 4-byte Reload
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adcs r1, lr, r1
+ ldr lr, [sp, #128] @ 4-byte Reload
+ adcs r2, r9, r2
+ adcs r12, r4, r12
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs lr, lr, r4
+ adcs r4, r5, r11
+ ldr r5, [sp, #132] @ 4-byte Reload
+ ldr r11, [sp, #116] @ 4-byte Reload
+ adcs r5, r5, r7
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adcs r8, r8, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adcs r10, r10, r7
+ ldr r7, [sp, #80] @ 4-byte Reload
+ adcs r6, r6, r7
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r6, [sp, #80] @ 4-byte Spill
+ ldr r6, [sp, #124] @ 4-byte Reload
+ adcs r6, r6, r7
+ ldr r7, [sp, #40] @ 4-byte Reload
+ str r6, [sp, #84] @ 4-byte Spill
+ ldr r6, [sp, #120] @ 4-byte Reload
+ adc r6, r6, r11
+ str r6, [sp, #116] @ 4-byte Spill
+ ldr r6, [sp, #100] @ 4-byte Reload
+ ands r6, r6, #1
+ moveq r3, r7
+ moveq r2, r9
+ str r3, [r0, #48]
+ ldr r3, [sp, #44] @ 4-byte Reload
+ moveq r1, r3
+ cmp r6, #0
+ str r1, [r0, #52]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r2, [r0, #56]
+ ldr r2, [sp, #80] @ 4-byte Reload
+ moveq r12, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r12, [r0, #60]
+ moveq lr, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str lr, [r0, #64]
+ moveq r4, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ cmp r6, #0
+ str r4, [r0, #68]
+ moveq r5, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r5, [r0, #72]
+ moveq r8, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r8, [r0, #76]
+ moveq r10, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ cmp r6, #0
+ str r10, [r0, #80]
+ moveq r2, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r2, [r0, #84]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r2, [r0, #88]
+ ldr r2, [sp, #116] @ 4-byte Reload
+ moveq r2, r1
+ str r2, [r0, #92]
+ add sp, sp, #136
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end189:
+ .size mcl_fpDbl_sub12L, .Lfunc_end189-mcl_fpDbl_sub12L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv416x32,%function
+.LmulPv416x32: @ @mulPv416x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r3, [r1, #32]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #32]
+ ldr r3, [r1, #36]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #36]
+ ldr r3, [r1, #40]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #40]
+ ldr r3, [r1, #44]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #44]
+ ldr r1, [r1, #48]
+ umull r3, r7, r1, r2
+ adcs r1, r5, r3
+ str r1, [r0, #48]
+ adc r1, r7, #0
+ str r1, [r0, #52]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end190:
+ .size .LmulPv416x32, .Lfunc_end190-.LmulPv416x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre13L
+ .align 2
+ .type mcl_fp_mulUnitPre13L,%function
+mcl_fp_mulUnitPre13L: @ @mcl_fp_mulUnitPre13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #68
+ sub sp, sp, #68
+ mov r4, r0
+ add r0, sp, #8
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #60]
+ add r12, sp, #12
+ ldr lr, [sp, #56]
+ ldr r8, [sp, #52]
+ ldr r9, [sp, #48]
+ ldr r10, [sp, #44]
+ ldr r11, [sp, #40]
+ ldr r5, [sp, #36]
+ ldr r6, [sp, #32]
+ ldr r7, [sp, #28]
+ ldr r3, [sp, #8]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r12, {r0, r1, r2, r12}
+ str r3, [r4]
+ stmib r4, {r0, r1, r2, r12}
+ str r7, [r4, #20]
+ str r6, [r4, #24]
+ str r5, [r4, #28]
+ str r11, [r4, #32]
+ str r10, [r4, #36]
+ str r9, [r4, #40]
+ str r8, [r4, #44]
+ str lr, [r4, #48]
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r0, [r4, #52]
+ add sp, sp, #68
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end191:
+ .size mcl_fp_mulUnitPre13L, .Lfunc_end191-mcl_fp_mulUnitPre13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre13L
+ .align 2
+ .type mcl_fpDbl_mulPre13L,%function
+mcl_fpDbl_mulPre13L: @ @mcl_fpDbl_mulPre13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #820
+ sub sp, sp, #820
+ mov r7, r2
+ mov r4, r0
+ add r0, sp, #760
+ str r1, [sp, #84] @ 4-byte Spill
+ mov r5, r1
+ ldr r2, [r7]
+ str r7, [sp, #80] @ 4-byte Spill
+ str r4, [sp, #76] @ 4-byte Spill
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #812]
+ ldr r1, [sp, #764]
+ ldr r2, [r7, #4]
+ mov r6, r5
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #768]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #772]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r1, [sp, #20] @ 4-byte Spill
+ mov r1, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #784]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #780]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #776]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #760]
+ str r0, [r4]
+ add r0, sp, #704
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #756]
+ add r10, sp, #728
+ add lr, sp, #704
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #752]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #748]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #744]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #24] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #4]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #8]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #648
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #700]
+ add lr, sp, #676
+ add r9, sp, #656
+ ldr r11, [sp, #692]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm lr, {r5, r7, r12, lr}
+ ldr r8, [sp, #648]
+ ldr r10, [sp, #652]
+ ldm r9, {r0, r1, r2, r3, r9}
+ ldr r6, [sp, #24] @ 4-byte Reload
+ adds r6, r8, r6
+ str r6, [r4, #8]
+ mov r6, r4
+ ldr r4, [sp, #40] @ 4-byte Reload
+ adcs r4, r10, r4
+ str r4, [sp, #24] @ 4-byte Spill
+ ldr r4, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r2, [r5, #12]
+ adcs r0, r7, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #592
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #644]
+ add lr, sp, #612
+ add r7, sp, #600
+ ldr r8, [sp, #628]
+ ldr r11, [sp, #624]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #640]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #636]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #632]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r0, [sp, #592]
+ ldr r9, [sp, #596]
+ ldm r7, {r1, r2, r7}
+ ldr r10, [sp, #24] @ 4-byte Reload
+ adds r0, r0, r10
+ str r0, [r6, #12]
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r6, r9, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #16]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #536
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #588]
+ ldr r8, [sp, #536]
+ add r4, sp, #540
+ ldr r11, [sp, #580]
+ ldr r9, [sp, #576]
+ ldr lr, [sp, #572]
+ ldr r5, [sp, #568]
+ ldr r10, [sp, #564]
+ ldr r12, [sp, #560]
+ ldr r3, [sp, #556]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ adds r6, r8, r6
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r4, {r0, r1, r2, r4}
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r6, [r7, #16]
+ ldr r6, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r2, [r4, #20]
+ adcs r0, r3, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #480
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #532]
+ add r10, sp, #480
+ add r12, sp, #492
+ ldr r6, [sp, #516]
+ ldr r11, [sp, #512]
+ ldr lr, [sp, #508]
+ ldr r9, [sp, #504]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #528]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #524]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #520]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r0, r1, r10}
+ ldm r12, {r2, r3, r12}
+ ldr r8, [sp, #24] @ 4-byte Reload
+ adds r0, r0, r8
+ str r0, [r7, #20]
+ ldr r0, [sp, #44] @ 4-byte Reload
+ mov r7, r5
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #24]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #424
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #476]
+ add r5, sp, #428
+ ldr r11, [sp, #464]
+ ldr r9, [sp, #460]
+ ldr lr, [sp, #456]
+ ldr r10, [sp, #452]
+ ldr r12, [sp, #448]
+ ldr r3, [sp, #444]
+ ldr r8, [sp, #424]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #468]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r5, {r0, r1, r2, r5}
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adds r6, r8, r4
+ ldr r4, [sp, #76] @ 4-byte Reload
+ str r6, [r4, #24]
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r2, [r5, #28]
+ adcs r0, r3, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #368
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #420]
+ add r12, sp, #388
+ add r10, sp, #368
+ ldr lr, [sp, #408]
+ ldr r6, [sp, #404]
+ ldr r11, [sp, #400]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r12, {r3, r9, r12}
+ ldr r7, [sp, #384]
+ ldm r10, {r0, r1, r10}
+ ldr r8, [sp, #24] @ 4-byte Reload
+ ldr r2, [sp, #380]
+ adds r0, r0, r8
+ str r0, [r4, #28]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r4, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #32]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #312
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #364]
+ add r11, sp, #344
+ add lr, sp, #316
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #360]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #356]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r7, r9, r11}
+ ldr r10, [sp, #340]
+ ldr r8, [sp, #312]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r5, [sp, #24] @ 4-byte Reload
+ adds r6, r8, r5
+ ldr r5, [sp, #76] @ 4-byte Reload
+ str r6, [r5, #32]
+ ldr r6, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r6, #36]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #256
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #308]
+ add lr, sp, #288
+ add r12, sp, #268
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #304]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #300]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm lr, {r7, r8, lr}
+ ldr r11, [sp, #284]
+ ldr r1, [sp, #256]
+ ldr r0, [sp, #260]
+ ldr r10, [sp, #264]
+ ldm r12, {r2, r3, r9, r12}
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adds r1, r1, r4
+ str r1, [r5, #36]
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r5, [sp, #84] @ 4-byte Reload
+ adcs r4, r0, r1
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r6, #40]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #200
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #252]
+ add r11, sp, #228
+ add lr, sp, #204
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #248]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #244]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r7, r8, r10, r11}
+ ldr r9, [sp, #200]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r6, r9, r4
+ ldr r4, [sp, #76] @ 4-byte Reload
+ str r6, [r4, #40]
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r6, #44]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #144
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #196]
+ add r11, sp, #164
+ add r12, sp, #152
+ ldr lr, [sp, #184]
+ ldr r7, [sp, #180]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r8, r10, r11}
+ ldr r2, [sp, #144]
+ ldr r1, [sp, #148]
+ ldm r12, {r0, r3, r12}
+ ldr r9, [sp, #24] @ 4-byte Reload
+ adds r2, r2, r9
+ str r2, [r4, #44]
+ ldr r2, [r6, #48]
+ ldr r6, [sp, #20] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r9, r0, r1
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #88
+ bl .LmulPv416x32(PLT)
+ add r3, sp, #88
+ add r11, sp, #104
+ ldm r3, {r0, r1, r2, r3}
+ adds r12, r0, r6
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs lr, r1, r9
+ adcs r5, r2, r0
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r6, r3, r0
+ ldr r0, [sp, #140]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldm r11, {r0, r1, r2, r3, r7, r8, r9, r10, r11}
+ str r12, [r4, #48]
+ str lr, [r4, #52]
+ str r5, [r4, #56]
+ ldr r5, [sp, #24] @ 4-byte Reload
+ str r6, [r4, #60]
+ ldr r6, [sp, #28] @ 4-byte Reload
+ add r12, r4, #80
+ adcs r0, r0, r5
+ adcs r1, r1, r6
+ str r0, [r4, #64]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r6, [sp, #84] @ 4-byte Reload
+ str r1, [r4, #68]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #72] @ 4-byte Reload
+ adcs r1, r3, r1
+ str r0, [r4, #72]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r3, [sp, #68] @ 4-byte Reload
+ str r1, [r4, #76]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adcs r1, r8, r1
+ adcs r2, r9, r2
+ adcs r3, r10, r3
+ adcs r7, r11, r7
+ adc r6, r6, #0
+ stm r12, {r0, r1, r2, r3, r7}
+ str r6, [r4, #100]
+ add sp, sp, #820
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end192:
+ .size mcl_fpDbl_mulPre13L, .Lfunc_end192-mcl_fpDbl_mulPre13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre13L
+ .align 2
+ .type mcl_fpDbl_sqrPre13L,%function
+mcl_fpDbl_sqrPre13L: @ @mcl_fpDbl_sqrPre13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #820
+ sub sp, sp, #820
+ mov r5, r1
+ mov r4, r0
+ add r0, sp, #760
+ ldr r2, [r5]
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #812]
+ ldr r1, [sp, #764]
+ ldr r2, [r5, #4]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #768]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #772]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r1, [sp, #32] @ 4-byte Spill
+ mov r1, r5
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #784]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #780]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #776]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #760]
+ str r0, [r4]
+ add r0, sp, #704
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #756]
+ add r10, sp, #728
+ add lr, sp, #704
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #752]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #748]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #36] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #4]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #8]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #648
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #700]
+ add lr, sp, #680
+ add r11, sp, #656
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm lr, {r6, r12, lr}
+ ldr r8, [sp, #648]
+ ldr r10, [sp, #652]
+ ldm r11, {r0, r1, r2, r3, r9, r11}
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adds r7, r8, r7
+ str r7, [r4, #8]
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r7, r10, r7
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #12]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #592
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #644]
+ add r9, sp, #620
+ add lr, sp, #600
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #640]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #636]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r9, {r6, r7, r8, r9}
+ ldr r0, [sp, #592]
+ ldr r11, [sp, #596]
+ ldm lr, {r1, r2, r3, r12, lr}
+ ldr r10, [sp, #36] @ 4-byte Reload
+ adds r0, r0, r10
+ str r0, [r4, #12]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #16]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #536
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #588]
+ add r12, sp, #540
+ ldr r11, [sp, #576]
+ ldr lr, [sp, #572]
+ ldr r6, [sp, #568]
+ ldr r8, [sp, #536]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r12, {r0, r1, r2, r3, r9, r10, r12}
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adds r7, r8, r7
+ str r7, [r4, #16]
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r7, r0, r7
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #20]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #480
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #532]
+ add r10, sp, #512
+ add lr, sp, #484
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #528]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #524]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r6, r8, r10}
+ ldr r9, [sp, #480]
+ ldr r11, [sp, #508]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r9, r7
+ str r7, [r4, #20]
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #24]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #424
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #476]
+ add r8, sp, #456
+ add r12, sp, #432
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #468]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r8, {r6, r7, r8}
+ ldr lr, [sp, #452]
+ ldr r10, [sp, #448]
+ ldr r0, [sp, #424]
+ ldr r11, [sp, #428]
+ ldm r12, {r1, r2, r3, r12}
+ ldr r9, [sp, #36] @ 4-byte Reload
+ adds r0, r0, r9
+ str r0, [r4, #24]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #28]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #368
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #420]
+ add r11, sp, #400
+ add lr, sp, #372
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r6, r8, r11}
+ ldr r10, [sp, #368]
+ ldm lr, {r0, r1, r2, r3, r9, r12, lr}
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adds r7, r10, r7
+ str r7, [r4, #28]
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adcs r7, r0, r7
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #32]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #312
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #364]
+ add r10, sp, #344
+ add lr, sp, #316
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #360]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #356]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r6, r8, r10}
+ ldr r9, [sp, #312]
+ ldr r11, [sp, #340]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r9, r7
+ str r7, [r4, #32]
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #36]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #256
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #308]
+ add r8, sp, #288
+ add r12, sp, #264
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #304]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #300]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r8, {r6, r7, r8}
+ ldr lr, [sp, #284]
+ ldr r10, [sp, #280]
+ ldr r0, [sp, #256]
+ ldr r11, [sp, #260]
+ ldm r12, {r1, r2, r3, r12}
+ ldr r9, [sp, #36] @ 4-byte Reload
+ adds r0, r0, r9
+ str r0, [r4, #36]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #40]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #200
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #252]
+ add r10, sp, #228
+ add r12, sp, #200
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #248]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #244]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r10}
+ ldr lr, [sp, #224]
+ ldr r9, [sp, #220]
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r11, [sp, #32] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #40]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #44]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #144
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #196]
+ add r12, sp, #148
+ ldr r7, [sp, #180]
+ ldr r11, [sp, #176]
+ ldr r8, [sp, #172]
+ ldr lr, [sp, #168]
+ ldr r10, [sp, #164]
+ ldr r2, [sp, #144]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #184]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r12, {r0, r1, r3, r12}
+ ldr r6, [sp, #32] @ 4-byte Reload
+ adds r2, r2, r6
+ ldr r6, [sp, #84] @ 4-byte Reload
+ str r2, [r4, #44]
+ ldr r2, [r5, #48]
+ adcs r6, r0, r6
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r9, r1, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #88
+ bl .LmulPv416x32(PLT)
+ add r3, sp, #88
+ add r11, sp, #104
+ ldm r3, {r0, r1, r2, r3}
+ adds r12, r0, r6
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs lr, r1, r9
+ adcs r5, r2, r0
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r6, r3, r0
+ ldr r0, [sp, #140]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldm r11, {r0, r1, r2, r3, r7, r8, r9, r10, r11}
+ str r12, [r4, #48]
+ str lr, [r4, #52]
+ str r5, [r4, #56]
+ ldr r5, [sp, #32] @ 4-byte Reload
+ str r6, [r4, #60]
+ ldr r6, [sp, #36] @ 4-byte Reload
+ add r12, r4, #80
+ adcs r0, r0, r5
+ adcs r1, r1, r6
+ str r0, [r4, #64]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r6, [sp, #56] @ 4-byte Reload
+ str r1, [r4, #68]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #80] @ 4-byte Reload
+ adcs r1, r3, r1
+ str r0, [r4, #72]
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r3, [sp, #76] @ 4-byte Reload
+ str r1, [r4, #76]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adcs r1, r8, r1
+ adcs r2, r9, r2
+ adcs r3, r10, r3
+ adcs r7, r11, r7
+ adc r6, r6, #0
+ stm r12, {r0, r1, r2, r3, r7}
+ str r6, [r4, #100]
+ add sp, sp, #820
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end193:
+ .size mcl_fpDbl_sqrPre13L, .Lfunc_end193-mcl_fpDbl_sqrPre13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont13L
+ .align 2
+ .type mcl_fp_mont13L,%function
+mcl_fp_mont13L: @ @mcl_fp_mont13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #548
+ sub sp, sp, #548
+ .pad #1024
+ sub sp, sp, #1024
+ add r12, sp, #100
+ add r6, sp, #1024
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, r6, #488
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #96] @ 4-byte Spill
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1516]
+ ldr r7, [sp, #1512]
+ mov r1, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #1520]
+ mul r2, r7, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1524]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1564]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #1560]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #1556]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1552]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1548]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1544]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1540]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1536]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1532]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1528]
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #1456
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1508]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r5, [sp, #1480]
+ ldr r10, [sp, #1476]
+ ldr r11, [sp, #1472]
+ ldr r6, [sp, #1456]
+ ldr r9, [sp, #1460]
+ ldr r8, [sp, #1464]
+ ldr r4, [sp, #1468]
+ add lr, sp, #1024
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1504]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1500]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1496]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1492]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1488]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1484]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, lr, #376
+ bl .LmulPv416x32(PLT)
+ adds r0, r6, r7
+ ldr r1, [sp, #36] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ ldr r3, [sp, #1416]
+ ldr r12, [sp, #1420]
+ ldr lr, [sp, #1424]
+ ldr r6, [sp, #1432]
+ ldr r7, [sp, #1436]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #1444]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #1440]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #1428]
+ adcs r1, r11, r1
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ ldr r11, [sp, #72] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r10, r1
+ ldr r10, [sp, #1448]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r5, r1
+ ldr r5, [sp, #1400]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #1412]
+ adc r0, r0, #0
+ adds r11, r11, r5
+ ldr r5, [sp, #64] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #1408]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1452]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1404]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #1344
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1396]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #1368]
+ ldr r9, [sp, #1364]
+ ldr r10, [sp, #1360]
+ ldr r11, [sp, #1344]
+ ldr r6, [sp, #1348]
+ ldr r7, [sp, #1352]
+ ldr r4, [sp, #1356]
+ add lr, sp, #1024
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1392]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1388]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1384]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1380]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1376]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1372]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, lr, #264
+ bl .LmulPv416x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r5, [sp, #1288]
+ ldr r2, [sp, #1300]
+ ldr r3, [sp, #1304]
+ ldr r12, [sp, #1308]
+ ldr lr, [sp, #1312]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1320]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1324]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1316]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1336]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1332]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1328]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1296]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r11, r11, r5
+ ldr r5, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1292]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #1232
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1284]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #1256]
+ ldr r9, [sp, #1252]
+ ldr r10, [sp, #1248]
+ ldr r11, [sp, #1232]
+ ldr r6, [sp, #1236]
+ ldr r7, [sp, #1240]
+ ldr r4, [sp, #1244]
+ add lr, sp, #1024
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1276]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1272]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1268]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1260]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, lr, #152
+ bl .LmulPv416x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r5, [sp, #1176]
+ ldr r2, [sp, #1188]
+ ldr r3, [sp, #1192]
+ ldr r12, [sp, #1196]
+ ldr lr, [sp, #1200]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1208]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1212]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1204]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1224]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1220]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1216]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1184]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r11, r11, r5
+ ldr r5, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1228]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1180]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #1120
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1172]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #1144]
+ ldr r9, [sp, #1140]
+ ldr r10, [sp, #1136]
+ ldr r11, [sp, #1120]
+ ldr r6, [sp, #1124]
+ ldr r7, [sp, #1128]
+ ldr r4, [sp, #1132]
+ add lr, sp, #1024
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1168]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1164]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1160]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1156]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1152]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, lr, #40
+ bl .LmulPv416x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r5, [sp, #1064]
+ ldr r2, [sp, #1076]
+ ldr r3, [sp, #1080]
+ ldr r12, [sp, #1084]
+ ldr lr, [sp, #1088]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1096]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1100]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1092]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1112]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1108]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1104]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1072]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r11, r11, r5
+ ldr r5, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1116]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #1008
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1060]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #1032]
+ ldr r9, [sp, #1028]
+ ldr r10, [sp, #1024]
+ ldr r11, [sp, #1008]
+ ldr r6, [sp, #1012]
+ ldr r7, [sp, #1016]
+ ldr r4, [sp, #1020]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #952
+ bl .LmulPv416x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #956
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #980
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1004]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r6, r7, r8, r9, r10}
+ ldr r5, [sp, #952]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r11, r11, r5
+ ldr r5, [sp, #88] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #896
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #948]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #920]
+ ldr r9, [sp, #916]
+ ldr r10, [sp, #912]
+ ldr r11, [sp, #896]
+ ldr r6, [sp, #900]
+ ldr r7, [sp, #904]
+ ldr r4, [sp, #908]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #944]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #940]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #936]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #932]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #840
+ bl .LmulPv416x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #844
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #868
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r6, r7, r8, r9, r10}
+ ldr r5, [sp, #840]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r11, r11, r5
+ ldr r5, [sp, #88] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #784
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #836]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #808]
+ ldr r9, [sp, #804]
+ ldr r10, [sp, #800]
+ ldr r11, [sp, #784]
+ ldr r6, [sp, #788]
+ ldr r7, [sp, #792]
+ ldr r4, [sp, #796]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #832]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #828]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #824]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #820]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #816]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #728
+ bl .LmulPv416x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #732
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #756
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #780]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r6, r7, r8, r9, r10}
+ ldr r5, [sp, #728]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r11, r11, r5
+ ldr r5, [sp, #88] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #672
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #724]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #696]
+ ldr r9, [sp, #692]
+ ldr r10, [sp, #688]
+ ldr r11, [sp, #672]
+ ldr r6, [sp, #676]
+ ldr r7, [sp, #680]
+ ldr r4, [sp, #684]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #720]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #716]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #616
+ bl .LmulPv416x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #620
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #644
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r6, r7, r8, r9, r10}
+ ldr r5, [sp, #616]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r11, r11, r5
+ ldr r5, [sp, #88] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #560
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #612]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #584]
+ ldr r9, [sp, #580]
+ ldr r10, [sp, #576]
+ ldr r11, [sp, #560]
+ ldr r6, [sp, #564]
+ ldr r7, [sp, #568]
+ ldr r4, [sp, #572]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #608]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #604]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #600]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #592]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #504
+ bl .LmulPv416x32(PLT)
+ adds r0, r5, r11
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #508
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #532
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r4, r6, r8, r9, r10}
+ ldr r5, [sp, #504]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #88] @ 4-byte Reload
+ adds r5, r11, r5
+ adcs r0, r7, r0
+ str r5, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ mul r2, r5, r8
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #448
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #500]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r5, [sp, #472]
+ ldr r9, [sp, #468]
+ ldr r10, [sp, #464]
+ ldr r11, [sp, #448]
+ ldr r6, [sp, #452]
+ ldr r7, [sp, #456]
+ ldr r4, [sp, #460]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #496]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #492]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #488]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #484]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #480]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #476]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #392
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #88] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ add lr, sp, #408
+ adds r0, r0, r11
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ add r6, sp, #392
+ adcs r11, r1, r7
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #432
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #36] @ 4-byte Spill
+ ldm r6, {r2, r5, r6}
+ ldr r4, [sp, #404]
+ adds r0, r0, r2
+ mul r1, r0, r8
+ adcs r5, r11, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ str r1, [sp, #28] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ str r5, [sp, #88] @ 4-byte Spill
+ ldr r5, [sp, #84] @ 4-byte Reload
+ adcs r5, r5, r6
+ str r5, [sp, #84] @ 4-byte Spill
+ ldr r5, [sp, #80] @ 4-byte Reload
+ adcs r4, r5, r4
+ str r4, [sp, #80] @ 4-byte Spill
+ ldr r4, [sp, #76] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #336
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #388]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r6, [sp, #364]
+ ldr r8, [sp, #360]
+ ldr r9, [sp, #356]
+ ldr r10, [sp, #352]
+ ldr r7, [sp, #336]
+ ldr r4, [sp, #340]
+ ldr r11, [sp, #344]
+ ldr r5, [sp, #348]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #384]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #380]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #376]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #372]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #280
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ add lr, sp, #296
+ adds r0, r0, r7
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #292]
+ adcs r11, r1, r11
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #288]
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #320
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #284]
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #280]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #28] @ 4-byte Spill
+ adds r1, r0, r2
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r1, [sp, #92] @ 4-byte Spill
+ mul r2, r1, r0
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ str r6, [sp, #40] @ 4-byte Spill
+ ldr r6, [sp, #88] @ 4-byte Reload
+ adcs r5, r6, r5
+ str r5, [sp, #36] @ 4-byte Spill
+ ldr r5, [sp, #84] @ 4-byte Reload
+ adcs r4, r5, r4
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #224
+ bl .LmulPv416x32(PLT)
+ ldr r1, [sp, #276]
+ add r11, sp, #224
+ ldr r4, [sp, #252]
+ ldr r8, [sp, #248]
+ ldr r9, [sp, #244]
+ ldr r10, [sp, #240]
+ add r0, sp, #168
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #272]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #268]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #264]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #260]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #256]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldm r11, {r6, r7, r11}
+ ldr r1, [sp, #104] @ 4-byte Reload
+ ldr r5, [sp, #236]
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #36] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ add lr, sp, #184
+ adds r0, r0, r6
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r7
+ add r7, sp, #168
+ adcs r1, r1, r11
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #208
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #48] @ 4-byte Spill
+ ldm r7, {r2, r6, r7}
+ ldr r5, [sp, #180]
+ adds r4, r0, r2
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r1, r4, r0
+ ldr r0, [sp, #220]
+ str r1, [sp, #44] @ 4-byte Spill
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #104] @ 4-byte Reload
+ adcs r11, r11, r6
+ ldr r6, [sp, #100] @ 4-byte Reload
+ adcs r6, r6, r7
+ str r6, [sp, #36] @ 4-byte Spill
+ ldr r6, [sp, #92] @ 4-byte Reload
+ adcs r5, r6, r5
+ ldr r6, [sp, #88] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r8, r0, r8
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r9, r0, r9
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r6, r0, r1
+ mov r0, #0
+ mov r1, r10
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #112
+ bl .LmulPv416x32(PLT)
+ add r3, sp, #112
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r4, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r7, r11, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r7, [sp, #48] @ 4-byte Spill
+ adcs lr, r0, r2
+ ldr r0, [sp, #128]
+ adcs r12, r5, r3
+ str lr, [sp, #52] @ 4-byte Spill
+ str r12, [sp, #56] @ 4-byte Spill
+ adcs r4, r1, r0
+ ldr r0, [sp, #132]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r4, [sp, #60] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #136]
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #140]
+ adcs r0, r1, r0
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #144]
+ adcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #148]
+ adcs r0, r1, r0
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #152]
+ adcs r0, r8, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #156]
+ adcs r0, r9, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #160]
+ adcs r0, r1, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #164]
+ adcs r0, r6, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, r10
+ ldmib r0, {r1, r2, r3, r5}
+ ldr r6, [r0]
+ ldr r10, [r0, #20]
+ ldr r11, [r0, #28]
+ str r5, [sp, #40] @ 4-byte Spill
+ ldr r5, [r0, #24]
+ subs r6, r7, r6
+ sbcs r9, lr, r1
+ str r5, [sp, #44] @ 4-byte Spill
+ mov r5, r0
+ sbcs r0, r12, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ ldr r1, [r5, #48]
+ sbcs r3, r4, r3
+ ldr lr, [r5, #32]
+ ldr r12, [r5, #36]
+ ldr r8, [r5, #40]
+ ldr r4, [r5, #44]
+ ldr r5, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ sbcs r1, r1, r2
+ ldr r2, [sp, #76] @ 4-byte Reload
+ sbcs r7, r2, r10
+ ldr r2, [sp, #80] @ 4-byte Reload
+ sbcs r2, r2, r5
+ ldr r5, [sp, #84] @ 4-byte Reload
+ sbcs r10, r5, r11
+ ldr r5, [sp, #88] @ 4-byte Reload
+ sbcs r11, r5, lr
+ ldr r5, [sp, #92] @ 4-byte Reload
+ sbcs r12, r5, r12
+ ldr r5, [sp, #96] @ 4-byte Reload
+ sbcs lr, r5, r8
+ ldr r5, [sp, #100] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #104] @ 4-byte Reload
+ str r4, [sp, #44] @ 4-byte Spill
+ ldr r4, [sp, #108] @ 4-byte Reload
+ sbcs r5, r5, r4
+ str r5, [sp, #108] @ 4-byte Spill
+ ldr r5, [sp, #64] @ 4-byte Reload
+ sbc r5, r5, #0
+ ands r8, r5, #1
+ ldr r5, [sp, #48] @ 4-byte Reload
+ movne r6, r5
+ ldr r5, [sp, #68] @ 4-byte Reload
+ str r6, [r5]
+ ldr r6, [sp, #52] @ 4-byte Reload
+ movne r9, r6
+ ldr r6, [sp, #56] @ 4-byte Reload
+ str r9, [r5, #4]
+ movne r0, r6
+ cmp r8, #0
+ str r0, [r5, #8]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ movne r3, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r3, [r5, #12]
+ movne r1, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r1, [r5, #16]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ movne r7, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ cmp r8, #0
+ str r7, [r5, #20]
+ movne r2, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r2, [r5, #24]
+ movne r10, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r10, [r5, #28]
+ movne r11, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ cmp r8, #0
+ str r11, [r5, #32]
+ movne r12, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r12, [r5, #36]
+ movne lr, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str lr, [r5, #40]
+ movne r1, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ cmp r8, #0
+ str r1, [r5, #44]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ movne r1, r0
+ str r1, [r5, #48]
+ add sp, sp, #548
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end194:
+ .size mcl_fp_mont13L, .Lfunc_end194-mcl_fp_mont13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF13L
+ .align 2
+ .type mcl_fp_montNF13L,%function
+mcl_fp_montNF13L: @ @mcl_fp_montNF13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #548
+ sub sp, sp, #548
+ .pad #1024
+ sub sp, sp, #1024
+ add r12, sp, #100
+ add r6, sp, #1024
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, r6, #488
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #96] @ 4-byte Spill
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1516]
+ ldr r8, [sp, #1512]
+ mov r1, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1520]
+ mul r2, r8, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #1524]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1564]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #1560]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #1556]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1552]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1548]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1544]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1540]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1536]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1532]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1528]
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #1456
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1508]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r10, [sp, #1480]
+ ldr r11, [sp, #1476]
+ ldr r6, [sp, #1472]
+ ldr r7, [sp, #1456]
+ ldr r9, [sp, #1460]
+ ldr r4, [sp, #1464]
+ ldr r5, [sp, #1468]
+ add lr, sp, #1024
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1504]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1500]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1496]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1492]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1488]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1484]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, lr, #376
+ bl .LmulPv416x32(PLT)
+ adds r0, r7, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1412]
+ ldr r3, [sp, #1416]
+ ldr r12, [sp, #1420]
+ ldr lr, [sp, #1424]
+ ldr r7, [sp, #1436]
+ ldr r8, [sp, #1440]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #1444]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #1400]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #1428]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #1432]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r11, r0
+ ldr r11, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #1448]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adc r0, r1, r0
+ adds r11, r11, r4
+ ldr r4, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #1408]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1452]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1404]
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #1344
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1396]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #1368]
+ ldr r9, [sp, #1364]
+ ldr r10, [sp, #1360]
+ ldr r11, [sp, #1344]
+ ldr r6, [sp, #1348]
+ ldr r7, [sp, #1352]
+ ldr r5, [sp, #1356]
+ add lr, sp, #1024
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1392]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1388]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1384]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1380]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1376]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1372]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, lr, #264
+ bl .LmulPv416x32(PLT)
+ adds r0, r4, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add r11, sp, #1312
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldr r0, [sp, #1288]
+ ldr r7, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #1292]
+ ldr r2, [sp, #1296]
+ ldr r3, [sp, #1300]
+ ldr r12, [sp, #1304]
+ ldr lr, [sp, #1308]
+ adds r7, r7, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1232
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1284]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r9, [sp, #1256]
+ ldr r10, [sp, #1252]
+ ldr r11, [sp, #1248]
+ ldr r7, [sp, #1232]
+ ldr r5, [sp, #1236]
+ ldr r4, [sp, #1240]
+ ldr r6, [sp, #1244]
+ add lr, sp, #1024
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1276]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1272]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1268]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1260]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, lr, #152
+ bl .LmulPv416x32(PLT)
+ adds r0, r8, r7
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1188]
+ ldr r3, [sp, #1192]
+ ldr r12, [sp, #1196]
+ ldr lr, [sp, #1200]
+ ldr r7, [sp, #1212]
+ ldr r8, [sp, #1216]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1204]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1176]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1208]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1224]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1220]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r11, r11, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ ldr r1, [sp, #1184]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1228]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1180]
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #1120
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1172]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #1144]
+ ldr r9, [sp, #1140]
+ ldr r10, [sp, #1136]
+ ldr r11, [sp, #1120]
+ ldr r6, [sp, #1124]
+ ldr r7, [sp, #1128]
+ ldr r5, [sp, #1132]
+ add lr, sp, #1024
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1168]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1164]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1160]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1156]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1152]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, lr, #40
+ bl .LmulPv416x32(PLT)
+ adds r0, r4, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add r11, sp, #1088
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1116]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldr r0, [sp, #1064]
+ ldr r7, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #1068]
+ ldr r2, [sp, #1072]
+ ldr r3, [sp, #1076]
+ ldr r12, [sp, #1080]
+ ldr lr, [sp, #1084]
+ adds r7, r7, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1008
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #1060]
+ add r11, sp, #1016
+ ldr r9, [sp, #1032]
+ ldr r10, [sp, #1028]
+ ldr r7, [sp, #1008]
+ ldr r5, [sp, #1012]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r6, r11}
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #952
+ bl .LmulPv416x32(PLT)
+ adds r0, r8, r7
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #956
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #980
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1004]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldr r4, [sp, #952]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r11, r11, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #896
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #948]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #920]
+ ldr r9, [sp, #916]
+ ldr r10, [sp, #912]
+ ldr r11, [sp, #896]
+ ldr r6, [sp, #900]
+ ldr r7, [sp, #904]
+ ldr r5, [sp, #908]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #944]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #940]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #936]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #932]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #840
+ bl .LmulPv416x32(PLT)
+ adds r0, r4, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add r11, sp, #864
+ add lr, sp, #840
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #92] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #784
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #836]
+ add r11, sp, #792
+ ldr r9, [sp, #808]
+ ldr r10, [sp, #804]
+ ldr r7, [sp, #784]
+ ldr r5, [sp, #788]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #832]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #828]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #824]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #820]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #816]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r6, r11}
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #728
+ bl .LmulPv416x32(PLT)
+ adds r0, r8, r7
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #732
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #756
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #780]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldr r4, [sp, #728]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r11, r11, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #672
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #724]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r8, [sp, #696]
+ ldr r9, [sp, #692]
+ ldr r10, [sp, #688]
+ ldr r11, [sp, #672]
+ ldr r6, [sp, #676]
+ ldr r7, [sp, #680]
+ ldr r5, [sp, #684]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #720]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #716]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #616
+ bl .LmulPv416x32(PLT)
+ adds r0, r4, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add r11, sp, #640
+ add lr, sp, #616
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #92] @ 4-byte Reload
+ adds r7, r7, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #560
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #612]
+ add r11, sp, #568
+ ldr r9, [sp, #584]
+ ldr r10, [sp, #580]
+ ldr r7, [sp, #560]
+ ldr r5, [sp, #564]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #608]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #604]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #600]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #592]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r6, r11}
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #504
+ bl .LmulPv416x32(PLT)
+ adds r0, r8, r7
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #508
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #532
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldr r4, [sp, #504]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ adds r11, r11, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r0, r4, r0
+ mov r4, r11
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ mul r2, r11, r8
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #448
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #500]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r9, [sp, #468]
+ ldr r10, [sp, #464]
+ ldr r11, [sp, #448]
+ ldr r6, [sp, #452]
+ ldr r7, [sp, #456]
+ ldr r5, [sp, #460]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #496]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #492]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #488]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #484]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #480]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #476]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #392
+ bl .LmulPv416x32(PLT)
+ adds r0, r4, r11
+ ldr r1, [sp, #88] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ add lr, sp, #408
+ ldr r4, [sp, #400]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #396]
+ adcs r1, r1, r7
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #404]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #432
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #392]
+ str r1, [sp, #40] @ 4-byte Spill
+ adds r0, r0, r2
+ mul r1, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ str r1, [sp, #32] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #88] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #88] @ 4-byte Spill
+ ldr r6, [sp, #84] @ 4-byte Reload
+ adcs r4, r6, r4
+ str r4, [sp, #84] @ 4-byte Spill
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r4, r4, r5
+ str r4, [sp, #80] @ 4-byte Spill
+ ldr r4, [sp, #76] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #336
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #388]
+ add r9, sp, #344
+ ldr r6, [sp, #364]
+ ldr r7, [sp, #360]
+ ldr r8, [sp, #356]
+ ldr r10, [sp, #336]
+ ldr r11, [sp, #340]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #384]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #380]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #376]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #372]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r4, r5, r9}
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #280
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #84] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #296
+ adds r0, r0, r10
+ add r10, sp, #320
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ adcs r1, r1, r4
+ ldr r4, [sp, #288]
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #292]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #284]
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #280]
+ str r1, [sp, #32] @ 4-byte Spill
+ adds r1, r0, r2
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r1, [sp, #92] @ 4-byte Spill
+ mul r2, r1, r0
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #88] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #44] @ 4-byte Spill
+ ldr r6, [sp, #84] @ 4-byte Reload
+ adcs r4, r6, r4
+ str r4, [sp, #40] @ 4-byte Spill
+ ldr r4, [sp, #80] @ 4-byte Reload
+ adcs r4, r4, r5
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [sp, #76] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ adc r0, r10, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #224
+ bl .LmulPv416x32(PLT)
+ ldr r1, [sp, #276]
+ add r9, sp, #232
+ ldr r6, [sp, #252]
+ ldr r7, [sp, #248]
+ ldr r8, [sp, #244]
+ ldr r10, [sp, #224]
+ ldr r11, [sp, #228]
+ add r0, sp, #168
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #272]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #268]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #264]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #260]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #256]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r9, {r4, r5, r9}
+ ldr r1, [sp, #104] @ 4-byte Reload
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #184
+ adds r0, r0, r10
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r11
+ adcs r1, r1, r4
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r7
+ add r7, sp, #168
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adc r1, r1, r2
+ str r1, [sp, #52] @ 4-byte Spill
+ ldm r7, {r2, r6, r7}
+ ldr r5, [sp, #180]
+ ldr r4, [sp, #216]
+ ldr r9, [sp, #212]
+ ldr r8, [sp, #208]
+ adds r10, r0, r2
+ ldr r0, [sp, #96] @ 4-byte Reload
+ mul r1, r10, r0
+ ldr r0, [sp, #220]
+ str r1, [sp, #48] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #104] @ 4-byte Reload
+ adcs r11, r11, r6
+ ldr r6, [sp, #100] @ 4-byte Reload
+ adcs r7, r6, r7
+ ldr r6, [sp, #92] @ 4-byte Reload
+ adcs r5, r6, r5
+ ldr r6, [sp, #88] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r8, r0, r8
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ mov r1, r4
+ adc r6, r0, #0
+ add r0, sp, #112
+ bl .LmulPv416x32(PLT)
+ add r3, sp, #112
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r10, r0
+ adcs r12, r11, r1
+ ldr r0, [sp, #128]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r2, r7, r2
+ str r12, [sp, #52] @ 4-byte Spill
+ adcs lr, r5, r3
+ str r2, [sp, #56] @ 4-byte Spill
+ str lr, [sp, #60] @ 4-byte Spill
+ adcs r9, r1, r0
+ ldr r0, [sp, #132]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r9, [sp, #64] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #136]
+ adcs r0, r1, r0
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #140]
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #144]
+ adcs r10, r1, r0
+ ldr r0, [sp, #148]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r10, [sp, #68] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #152]
+ adcs r0, r8, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #156]
+ adcs r0, r1, r0
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #160]
+ adcs r0, r1, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #164]
+ adc r0, r6, r0
+ mov r6, r4
+ str r0, [sp, #104] @ 4-byte Spill
+ ldmib r6, {r0, r1, r7}
+ ldr r5, [r6, #24]
+ ldr r4, [r6, #28]
+ ldr r3, [r6, #16]
+ ldr r11, [r6, #20]
+ str r5, [sp, #48] @ 4-byte Spill
+ ldr r5, [r6]
+ str r4, [sp, #44] @ 4-byte Spill
+ subs r5, r12, r5
+ sbcs r8, r2, r0
+ sbcs r2, lr, r1
+ sbcs lr, r9, r7
+ add r7, r6, #32
+ ldm r7, {r0, r1, r7}
+ ldr r4, [r6, #44]
+ ldr r9, [r6, #48]
+ ldr r6, [sp, #76] @ 4-byte Reload
+ sbcs r3, r6, r3
+ ldr r6, [sp, #80] @ 4-byte Reload
+ str r4, [sp, #40] @ 4-byte Spill
+ ldr r4, [sp, #48] @ 4-byte Reload
+ sbcs r12, r6, r11
+ ldr r6, [sp, #84] @ 4-byte Reload
+ sbcs r11, r6, r4
+ ldr r4, [sp, #44] @ 4-byte Reload
+ sbcs r10, r10, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ sbcs r4, r4, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ sbcs r6, r0, r1
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r7, r0, r7
+ ldr r0, [sp, #100] @ 4-byte Reload
+ sbcs r0, r0, r1
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ sbc r9, r0, r9
+ ldr r0, [sp, #52] @ 4-byte Reload
+ asr r1, r9, #31
+ cmp r1, #0
+ movlt r5, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r5, [r0]
+ ldr r5, [sp, #56] @ 4-byte Reload
+ movlt r8, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ str r8, [r0, #4]
+ movlt r2, r5
+ cmp r1, #0
+ str r2, [r0, #8]
+ ldr r2, [sp, #64] @ 4-byte Reload
+ movlt lr, r2
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str lr, [r0, #12]
+ movlt r3, r2
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r3, [r0, #16]
+ ldr r3, [sp, #108] @ 4-byte Reload
+ movlt r12, r2
+ ldr r2, [sp, #84] @ 4-byte Reload
+ cmp r1, #0
+ str r12, [r0, #20]
+ movlt r11, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r11, [r0, #24]
+ movlt r10, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str r10, [r0, #28]
+ movlt r4, r2
+ ldr r2, [sp, #92] @ 4-byte Reload
+ cmp r1, #0
+ str r4, [r0, #32]
+ movlt r6, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ str r6, [r0, #36]
+ movlt r7, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ str r7, [r0, #40]
+ movlt r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r3, [r0, #44]
+ movlt r9, r1
+ str r9, [r0, #48]
+ add sp, sp, #548
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end195:
+ .size mcl_fp_montNF13L, .Lfunc_end195-mcl_fp_montNF13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed13L
+ .align 2
+ .type mcl_fp_montRed13L,%function
+mcl_fp_montRed13L: @ @mcl_fp_montRed13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #908
+ sub sp, sp, #908
+ mov r3, r2
+ str r0, [sp, #164] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r11, [r1]
+ ldr r0, [r3]
+ str r3, [sp, #168] @ 4-byte Spill
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #160] @ 4-byte Spill
+ ldr r0, [r3, #4]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #156] @ 4-byte Spill
+ ldr r0, [r3, #8]
+ str r2, [sp, #64] @ 4-byte Spill
+ str r0, [sp, #152] @ 4-byte Spill
+ ldr r0, [r3, #12]
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [r3, #20]
+ str r0, [sp, #144] @ 4-byte Spill
+ ldr r0, [r3, #24]
+ str r0, [sp, #148] @ 4-byte Spill
+ ldr r0, [r3, #-4]
+ str r0, [sp, #172] @ 4-byte Spill
+ mul r2, r11, r0
+ ldr r0, [r3, #28]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [r1, #96]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [r1, #100]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #72]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r1, #76]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #80]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #84]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r1, #88]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r1, #92]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ mov r1, r3
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #848
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #900]
+ add r10, sp, #872
+ add lr, sp, #848
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #168] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #172] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #792
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #844]
+ add lr, sp, #832
+ add r9, sp, #800
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #792]
+ ldr r5, [sp, #828]
+ ldr r6, [sp, #824]
+ ldr r7, [sp, #820]
+ ldr r10, [sp, #816]
+ ldr r8, [sp, #812]
+ ldr r1, [sp, #796]
+ ldm r9, {r0, r2, r9}
+ adds r4, r11, r4
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adcs r11, r4, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #172] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #168] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #736
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #788]
+ add r10, sp, #760
+ add lr, sp, #736
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #784]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #780]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #680
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #732]
+ add lr, sp, #720
+ add r10, sp, #688
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #680]
+ ldr r5, [sp, #716]
+ ldr r6, [sp, #712]
+ ldr r7, [sp, #708]
+ ldr r1, [sp, #684]
+ ldm r10, {r0, r2, r8, r9, r10}
+ adds r4, r11, r4
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adcs r11, r4, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #172] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #168] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #624
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #676]
+ add r10, sp, #648
+ add lr, sp, #624
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #672]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #568
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #620]
+ add lr, sp, #608
+ add r10, sp, #576
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #568]
+ ldr r5, [sp, #604]
+ ldr r6, [sp, #600]
+ ldr r7, [sp, #596]
+ ldr r1, [sp, #572]
+ ldm r10, {r0, r2, r8, r9, r10}
+ adds r4, r11, r4
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adcs r11, r4, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #172] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #168] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #512
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #564]
+ add r10, sp, #536
+ add lr, sp, #512
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #456
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #508]
+ add lr, sp, #496
+ add r10, sp, #464
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #456]
+ ldr r5, [sp, #492]
+ ldr r6, [sp, #488]
+ ldr r7, [sp, #484]
+ ldr r1, [sp, #460]
+ ldm r10, {r0, r2, r8, r9, r10}
+ adds r4, r11, r4
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adcs r11, r4, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r4, [sp, #172] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #168] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #400
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #452]
+ add r10, sp, #424
+ add lr, sp, #400
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #448]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #444]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #344
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #396]
+ add lr, sp, #384
+ add r10, sp, #352
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r4, [sp, #344]
+ ldr r5, [sp, #380]
+ ldr r6, [sp, #376]
+ ldr r7, [sp, #372]
+ ldr r1, [sp, #348]
+ ldm r10, {r0, r2, r8, r9, r10}
+ adds r4, r11, r4
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adcs r11, r4, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #168] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #172] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ mul r2, r11, r7
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r8
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #288
+ bl .LmulPv416x32(PLT)
+ ldr r0, [sp, #340]
+ add r10, sp, #312
+ add lr, sp, #288
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #336]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #332]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r4
+ mov r4, r7
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ mul r2, r11, r4
+ adcs r0, r0, r5
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r9
+ mov r9, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #232
+ bl .LmulPv416x32(PLT)
+ add r7, sp, #232
+ add lr, sp, #272
+ ldm r7, {r0, r1, r3, r7}
+ ldr r8, [sp, #284]
+ adds r0, r11, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r11, r0, r1
+ mul r0, r11, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #172] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r7
+ add r7, sp, #256
+ str r0, [sp, #60] @ 4-byte Spill
+ ldm lr, {r5, r12, lr}
+ ldr r6, [sp, #268]
+ ldm r7, {r1, r2, r7}
+ ldr r0, [sp, #248]
+ ldr r3, [sp, #108] @ 4-byte Reload
+ ldr r4, [sp, #252]
+ adcs r10, r3, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r4, r0, r4
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r8, r0, r8
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ add r0, sp, #176
+ bl .LmulPv416x32(PLT)
+ add r3, sp, #176
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r11, r0
+ ldr r0, [sp, #172] @ 4-byte Reload
+ adcs r12, r0, r1
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r12, [sp, #52] @ 4-byte Spill
+ adcs r2, r0, r2
+ ldr r0, [sp, #192]
+ adcs r3, r10, r3
+ str r2, [sp, #64] @ 4-byte Spill
+ str r3, [sp, #68] @ 4-byte Spill
+ adcs r7, r4, r0
+ ldr r0, [sp, #196]
+ str r7, [sp, #72] @ 4-byte Spill
+ adcs r4, r1, r0
+ ldr r0, [sp, #200]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r4, [sp, #76] @ 4-byte Spill
+ adcs r5, r1, r0
+ ldr r0, [sp, #204]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r5, [sp, #80] @ 4-byte Spill
+ adcs r6, r1, r0
+ ldr r0, [sp, #208]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r6, [sp, #84] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #172] @ 4-byte Spill
+ ldr r0, [sp, #212]
+ adcs r11, r1, r0
+ ldr r0, [sp, #216]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r11, [sp, #92] @ 4-byte Spill
+ adcs r10, r1, r0
+ ldr r0, [sp, #220]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r10, [sp, #100] @ 4-byte Spill
+ adcs r9, r1, r0
+ ldr r0, [sp, #224]
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r9, [sp, #108] @ 4-byte Spill
+ adcs r8, r8, r0
+ ldr r0, [sp, #228]
+ str r8, [sp, #168] @ 4-byte Spill
+ adcs lr, r1, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #156] @ 4-byte Reload
+ str lr, [sp, #104] @ 4-byte Spill
+ adc r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #160] @ 4-byte Reload
+ subs r0, r12, r0
+ sbcs r1, r2, r1
+ ldr r2, [sp, #152] @ 4-byte Reload
+ sbcs r2, r3, r2
+ ldr r3, [sp, #136] @ 4-byte Reload
+ sbcs r3, r7, r3
+ ldr r7, [sp, #140] @ 4-byte Reload
+ sbcs r12, r4, r7
+ ldr r4, [sp, #144] @ 4-byte Reload
+ ldr r7, [sp, #172] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #148] @ 4-byte Reload
+ sbcs r5, r6, r5
+ ldr r6, [sp, #112] @ 4-byte Reload
+ sbcs r6, r7, r6
+ ldr r7, [sp, #116] @ 4-byte Reload
+ sbcs r7, r11, r7
+ str r7, [sp, #160] @ 4-byte Spill
+ ldr r7, [sp, #120] @ 4-byte Reload
+ sbcs r11, r10, r7
+ ldr r7, [sp, #124] @ 4-byte Reload
+ sbcs r9, r9, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ sbcs r10, r8, r7
+ ldr r7, [sp, #132] @ 4-byte Reload
+ sbcs r8, lr, r7
+ ldr r7, [sp, #96] @ 4-byte Reload
+ sbc r7, r7, #0
+ ands lr, r7, #1
+ ldr r7, [sp, #52] @ 4-byte Reload
+ movne r0, r7
+ ldr r7, [sp, #164] @ 4-byte Reload
+ str r0, [r7]
+ ldr r0, [sp, #64] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r1, [r7, #4]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ movne r2, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ cmp lr, #0
+ str r2, [r7, #8]
+ movne r3, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r3, [r7, #12]
+ movne r12, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r12, [r7, #16]
+ movne r4, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ cmp lr, #0
+ str r4, [r7, #20]
+ movne r5, r0
+ ldr r0, [sp, #172] @ 4-byte Reload
+ str r5, [r7, #24]
+ movne r6, r0
+ ldr r0, [sp, #160] @ 4-byte Reload
+ movne r0, r1
+ str r6, [r7, #28]
+ cmp lr, #0
+ str r0, [r7, #32]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ movne r11, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ str r11, [r7, #36]
+ movne r9, r0
+ ldr r0, [sp, #168] @ 4-byte Reload
+ str r9, [r7, #40]
+ movne r10, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ cmp lr, #0
+ str r10, [r7, #44]
+ movne r8, r0
+ str r8, [r7, #48]
+ add sp, sp, #908
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end196:
+ .size mcl_fp_montRed13L, .Lfunc_end196-mcl_fp_montRed13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre13L
+ .align 2
+ .type mcl_fp_addPre13L,%function
+mcl_fp_addPre13L: @ @mcl_fp_addPre13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #36
+ sub sp, sp, #36
+ ldm r1, {r3, r12, lr}
+ ldr r9, [r1, #12]
+ ldmib r2, {r5, r6, r7}
+ ldr r11, [r2]
+ ldr r4, [r2, #16]
+ ldr r10, [r2, #32]
+ adds r8, r11, r3
+ ldr r3, [r2, #48]
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ ldr r11, [r1, #44]
+ adcs r5, r5, r12
+ add r12, r1, #16
+ adcs r6, r6, lr
+ ldr lr, [r1, #32]
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r4, [sp, #20] @ 4-byte Spill
+ ldr r4, [r2, #28]
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ ldr r2, [r1, #36]
+ str r4, [sp, #24] @ 4-byte Spill
+ adcs r4, r7, r9
+ ldr r7, [r1, #40]
+ ldr r9, [r1, #48]
+ str r3, [sp, #4] @ 4-byte Spill
+ str r2, [sp] @ 4-byte Spill
+ ldm r12, {r1, r2, r3, r12}
+ str r8, [r0]
+ stmib r0, {r5, r6}
+ str r4, [r0, #12]
+ ldr r5, [sp, #8] @ 4-byte Reload
+ ldr r4, [sp, #12] @ 4-byte Reload
+ ldr r6, [sp, #32] @ 4-byte Reload
+ adcs r1, r5, r1
+ str r1, [r0, #16]
+ adcs r2, r4, r2
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r3
+ ldr r3, [sp] @ 4-byte Reload
+ adcs r2, r2, r12
+ str r1, [r0, #24]
+ add r12, r0, #32
+ str r2, [r0, #28]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ adcs r1, r10, lr
+ adcs r2, r2, r3
+ ldr r3, [sp, #16] @ 4-byte Reload
+ adcs r3, r3, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r7, r7, r11
+ adcs r6, r6, r9
+ stm r12, {r1, r2, r3, r7}
+ str r6, [r0, #48]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #36
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end197:
+ .size mcl_fp_addPre13L, .Lfunc_end197-mcl_fp_addPre13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre13L
+ .align 2
+ .type mcl_fp_subPre13L,%function
+mcl_fp_subPre13L: @ @mcl_fp_subPre13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #36
+ sub sp, sp, #36
+ ldr r3, [r2, #16]
+ ldr r7, [r2]
+ ldr r6, [r1]
+ ldr r12, [r2, #4]
+ ldr r4, [r2, #8]
+ ldr r11, [r2, #12]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r2, #20]
+ subs r7, r6, r7
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [r2, #24]
+ str r3, [sp, #24] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ str r3, [sp, #28] @ 4-byte Spill
+ ldmib r1, {r5, lr}
+ ldr r6, [r2, #48]
+ ldr r3, [r1, #12]
+ ldr r10, [r2, #32]
+ ldr r8, [r1, #44]
+ ldr r9, [r1, #48]
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [r2, #44]
+ sbcs r5, r5, r12
+ add r12, r1, #16
+ sbcs r4, lr, r4
+ sbcs lr, r3, r11
+ ldr r3, [r2, #36]
+ ldr r11, [r1, #36]
+ str r6, [sp, #16] @ 4-byte Spill
+ ldr r6, [r2, #40]
+ ldr r2, [r1, #40]
+ str r3, [sp, #4] @ 4-byte Spill
+ str r6, [sp, #8] @ 4-byte Spill
+ ldr r6, [r1, #32]
+ str r2, [sp] @ 4-byte Spill
+ ldm r12, {r1, r2, r3, r12}
+ str r7, [r0]
+ str r5, [r0, #4]
+ str r4, [r0, #8]
+ ldr r4, [sp, #12] @ 4-byte Reload
+ ldr r7, [sp, #20] @ 4-byte Reload
+ str lr, [r0, #12]
+ sbcs r1, r1, r4
+ sbcs r2, r2, r7
+ str r1, [r0, #16]
+ ldr r1, [sp, #24] @ 4-byte Reload
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ sbcs r1, r3, r1
+ ldr r3, [sp, #8] @ 4-byte Reload
+ sbcs r2, r12, r2
+ str r1, [r0, #24]
+ add r12, r0, #32
+ str r2, [r0, #28]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ sbcs r1, r6, r10
+ ldr r6, [sp, #32] @ 4-byte Reload
+ sbcs r2, r11, r2
+ sbcs r3, r7, r3
+ ldr r7, [sp, #16] @ 4-byte Reload
+ sbcs r7, r8, r7
+ sbcs r6, r9, r6
+ stm r12, {r1, r2, r3, r7}
+ str r6, [r0, #48]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #36
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end198:
+ .size mcl_fp_subPre13L, .Lfunc_end198-mcl_fp_subPre13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_13L
+ .align 2
+ .type mcl_fp_shr1_13L,%function
+mcl_fp_shr1_13L: @ @mcl_fp_shr1_13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #8
+ sub sp, sp, #8
+ add r9, r1, #8
+ ldm r9, {r2, r3, r4, r5, r8, r9}
+ ldm r1, {r10, lr}
+ ldr r12, [r1, #36]
+ lsr r7, lr, #1
+ lsr r6, r3, #1
+ lsrs r3, r3, #1
+ orr r11, r7, r2, lsl #31
+ ldr r7, [r1, #48]
+ rrx r2, r2
+ lsrs r3, lr, #1
+ rrx r3, r10
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r7, [r1, #44]
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r1, #40]
+ ldr r1, [r1, #32]
+ stm r0, {r3, r11}
+ str r2, [r0, #8]
+ orr r2, r6, r4, lsl #31
+ str r2, [r0, #12]
+ lsrs r2, r5, #1
+ ldr r6, [sp] @ 4-byte Reload
+ rrx r2, r4
+ str r2, [r0, #16]
+ lsr r2, r5, #1
+ orr r2, r2, r8, lsl #31
+ str r2, [r0, #20]
+ lsrs r2, r9, #1
+ rrx r2, r8
+ str r2, [r0, #24]
+ lsr r2, r9, #1
+ orr r2, r2, r1, lsl #31
+ str r2, [r0, #28]
+ lsrs r2, r12, #1
+ lsr r2, r12, #1
+ rrx r1, r1
+ lsrs r3, r6, #1
+ add r12, r0, #32
+ orr r2, r2, r7, lsl #31
+ rrx r3, r7
+ lsr r7, r6, #1
+ ldr r6, [sp, #4] @ 4-byte Reload
+ orr r7, r7, r6, lsl #31
+ lsr r6, r6, #1
+ stm r12, {r1, r2, r3, r7}
+ str r6, [r0, #48]
+ add sp, sp, #8
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end199:
+ .size mcl_fp_shr1_13L, .Lfunc_end199-mcl_fp_shr1_13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add13L
+ .align 2
+ .type mcl_fp_add13L,%function
+mcl_fp_add13L: @ @mcl_fp_add13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r7}
+ adds r11, r4, r9
+ ldr r9, [r1, #24]
+ adcs r4, r5, r8
+ ldr r5, [r1, #20]
+ adcs r6, r6, lr
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r1, #16]
+ mov lr, r11
+ adcs r7, r7, r12
+ str r6, [sp, #28] @ 4-byte Spill
+ ldr r6, [r2, #32]
+ str lr, [r0]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #16]
+ adcs r8, r7, r4
+ ldr r4, [r2, #20]
+ adcs r7, r4, r5
+ ldr r5, [r2, #24]
+ ldr r4, [r1, #28]
+ str r7, [sp, #40] @ 4-byte Spill
+ adcs r7, r5, r9
+ ldr r5, [r2, #28]
+ str r7, [sp, #4] @ 4-byte Spill
+ ldr r11, [sp, #4] @ 4-byte Reload
+ adcs r7, r5, r4
+ ldr r5, [r1, #32]
+ ldr r4, [sp, #32] @ 4-byte Reload
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #20] @ 4-byte Reload
+ adcs r10, r6, r5
+ ldr r6, [r1, #36]
+ ldr r5, [r2, #36]
+ str r4, [r0, #4]
+ str r10, [sp, #24] @ 4-byte Spill
+ adcs r9, r5, r6
+ ldr r6, [r1, #40]
+ ldr r5, [r2, #40]
+ adcs r12, r5, r6
+ ldr r6, [r1, #44]
+ ldr r5, [r2, #44]
+ ldr r1, [r1, #48]
+ ldr r2, [r2, #48]
+ adcs r6, r5, r6
+ ldr r5, [sp, #28] @ 4-byte Reload
+ adcs r2, r2, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r6, [sp, #16] @ 4-byte Spill
+ str r2, [sp, #12] @ 4-byte Spill
+ str r5, [r0, #8]
+ str r7, [r0, #12]
+ str r8, [r0, #16]
+ str r1, [r0, #20]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r11, [r0, #24]
+ str r1, [r0, #28]
+ str r10, [r0, #32]
+ str r9, [r0, #36]
+ str r12, [r0, #40]
+ str r6, [r0, #44]
+ str r2, [r0, #48]
+ mov r2, #0
+ mov r10, r12
+ adc r1, r2, #0
+ str r1, [sp, #8] @ 4-byte Spill
+ ldm r3, {r2, r6}
+ ldr r1, [r3, #8]
+ ldr r12, [r3, #12]
+ subs r2, lr, r2
+ str r2, [sp] @ 4-byte Spill
+ sbcs r2, r4, r6
+ sbcs r1, r5, r1
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ sbcs r7, r7, r12
+ add r12, r3, #32
+ sbcs r8, r8, r1
+ ldr r1, [r3, #20]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ sbcs r1, r11, r1
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ sbcs r5, r2, r1
+ ldm r12, {r1, r2, r6, r11, r12}
+ ldr r3, [sp, #24] @ 4-byte Reload
+ sbcs r3, r3, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r4, r9, r2
+ sbcs lr, r10, r6
+ ldr r6, [sp, #8] @ 4-byte Reload
+ sbcs r2, r1, r11
+ ldr r1, [sp, #12] @ 4-byte Reload
+ sbcs r1, r1, r12
+ sbc r6, r6, #0
+ tst r6, #1
+ bne .LBB200_2
+@ BB#1: @ %nocarry
+ mov r6, r7
+ ldr r7, [sp] @ 4-byte Reload
+ add r12, r0, #32
+ str r7, [r0]
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r7, [r0, #4]
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r7, [r0, #8]
+ ldr r7, [sp, #40] @ 4-byte Reload
+ str r6, [r0, #12]
+ str r8, [r0, #16]
+ str r7, [r0, #20]
+ ldr r7, [sp, #20] @ 4-byte Reload
+ str r7, [r0, #24]
+ str r5, [r0, #28]
+ stm r12, {r3, r4, lr}
+ str r2, [r0, #44]
+ str r1, [r0, #48]
+.LBB200_2: @ %carry
+ add sp, sp, #44
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end200:
+ .size mcl_fp_add13L, .Lfunc_end200-mcl_fp_add13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF13L
+ .align 2
+ .type mcl_fp_addNF13L,%function
+mcl_fp_addNF13L: @ @mcl_fp_addNF13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #64
+ sub sp, sp, #64
+ ldm r1, {r7, r8, lr}
+ ldr r6, [r2]
+ ldr r12, [r1, #12]
+ ldmib r2, {r4, r5, r9}
+ adds r10, r6, r7
+ ldr r7, [r2, #16]
+ ldr r6, [r1, #24]
+ adcs r4, r4, r8
+ adcs lr, r5, lr
+ ldr r5, [r1, #16]
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [r1, #20]
+ adcs r9, r9, r12
+ str lr, [sp, #8] @ 4-byte Spill
+ str r9, [sp, #12] @ 4-byte Spill
+ adcs r7, r7, r5
+ ldr r5, [r2, #20]
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r7, r5, r4
+ ldr r5, [r2, #24]
+ str r7, [sp, #36] @ 4-byte Spill
+ adcs r8, r5, r6
+ ldr r6, [r1, #28]
+ ldr r5, [r2, #28]
+ str r8, [sp, #16] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #32]
+ ldr r5, [r2, #32]
+ str r7, [sp, #40] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #36]
+ ldr r5, [r2, #36]
+ str r7, [sp, #44] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #40]
+ ldr r5, [r2, #40]
+ str r7, [sp, #56] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #44]
+ ldr r5, [r2, #44]
+ ldr r1, [r1, #48]
+ ldr r2, [r2, #48]
+ str r7, [sp, #52] @ 4-byte Spill
+ adcs r7, r5, r6
+ adc r1, r2, r1
+ str r7, [sp, #48] @ 4-byte Spill
+ str r1, [sp, #60] @ 4-byte Spill
+ ldmib r3, {r1, r12}
+ ldr r2, [r3, #24]
+ ldr r7, [r3]
+ ldr r6, [r3, #12]
+ ldr r5, [r3, #16]
+ ldr r4, [r3, #20]
+ ldr r11, [r3, #28]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [sp, #28] @ 4-byte Reload
+ subs r7, r10, r7
+ sbcs r2, r2, r1
+ ldr r1, [r3, #40]
+ sbcs r12, lr, r12
+ sbcs lr, r9, r6
+ ldr r9, [r3, #32]
+ ldr r6, [r3, #36]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [sp, #32] @ 4-byte Reload
+ sbcs r5, r1, r5
+ ldr r1, [sp, #36] @ 4-byte Reload
+ sbcs r3, r1, r4
+ ldr r1, [sp, #24] @ 4-byte Reload
+ sbcs r4, r8, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r8, r1, r11
+ ldr r1, [sp, #44] @ 4-byte Reload
+ sbcs r9, r1, r9
+ ldr r1, [sp, #56] @ 4-byte Reload
+ sbcs r11, r1, r6
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r6, [sp, #20] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp] @ 4-byte Reload
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ sbc r6, r1, r6
+ asr r1, r6, #31
+ cmp r1, #0
+ movlt r7, r10
+ str r7, [r0]
+ ldr r7, [sp, #28] @ 4-byte Reload
+ movlt r2, r7
+ str r2, [r0, #4]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ movlt r12, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ cmp r1, #0
+ str r12, [r0, #8]
+ movlt lr, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str lr, [r0, #12]
+ movlt r5, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r5, [r0, #16]
+ movlt r3, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #20]
+ ldr r3, [sp, #20] @ 4-byte Reload
+ movlt r4, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r4, [r0, #24]
+ movlt r8, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r8, [r0, #28]
+ movlt r9, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ cmp r1, #0
+ str r9, [r0, #32]
+ movlt r11, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r11, [r0, #36]
+ movlt r3, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r3, [r0, #40]
+ ldr r3, [sp, #24] @ 4-byte Reload
+ movlt r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r3, [r0, #44]
+ movlt r6, r1
+ str r6, [r0, #48]
+ add sp, sp, #64
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end201:
+ .size mcl_fp_addNF13L, .Lfunc_end201-mcl_fp_addNF13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub13L
+ .align 2
+ .type mcl_fp_sub13L,%function
+mcl_fp_sub13L: @ @mcl_fp_sub13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #56
+ sub sp, sp, #56
+ ldr r9, [r2]
+ ldmib r2, {r8, lr}
+ ldr r12, [r2, #12]
+ ldm r1, {r4, r5, r6, r7}
+ subs r11, r4, r9
+ ldr r4, [r2, #24]
+ sbcs r5, r5, r8
+ str r11, [sp, #28] @ 4-byte Spill
+ str r11, [r0]
+ sbcs r6, r6, lr
+ str r5, [sp, #52] @ 4-byte Spill
+ ldr r5, [r2, #20]
+ sbcs r7, r7, r12
+ str r6, [sp, #48] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ ldr r11, [sp, #44] @ 4-byte Reload
+ sbcs r10, r7, r6
+ ldr r7, [r1, #20]
+ str r10, [sp, #36] @ 4-byte Spill
+ sbcs r12, r7, r5
+ ldr r7, [r1, #24]
+ ldr r5, [r1, #28]
+ sbcs r8, r7, r4
+ ldr r7, [r2, #28]
+ ldr r4, [r1, #36]
+ str r8, [sp, #40] @ 4-byte Spill
+ sbcs r9, r5, r7
+ ldr r7, [r2, #32]
+ ldr r5, [r1, #32]
+ sbcs r5, r5, r7
+ ldr r7, [r2, #36]
+ sbcs r6, r4, r7
+ ldr r7, [r2, #40]
+ ldr r4, [r1, #40]
+ sbcs lr, r4, r7
+ ldr r7, [r2, #44]
+ ldr r4, [r1, #44]
+ ldr r2, [r2, #48]
+ ldr r1, [r1, #48]
+ sbcs r7, r4, r7
+ ldr r4, [sp, #52] @ 4-byte Reload
+ sbcs r2, r1, r2
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ str r2, [sp, #24] @ 4-byte Spill
+ str r4, [r0, #4]
+ str r1, [r0, #8]
+ str r11, [r0, #12]
+ str r10, [r0, #16]
+ str r12, [r0, #20]
+ str r8, [r0, #24]
+ str r9, [r0, #28]
+ str r5, [r0, #32]
+ str r6, [r0, #36]
+ str lr, [r0, #40]
+ str r7, [r0, #44]
+ str r2, [r0, #48]
+ mov r2, #0
+ sbc r2, r2, #0
+ tst r2, #1
+ beq .LBB202_2
+@ BB#1: @ %carry
+ ldr r2, [r3, #48]
+ ldr r7, [sp, #28] @ 4-byte Reload
+ ldr r10, [r3, #4]
+ ldr r8, [r3, #8]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r3, #12]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r3, #16]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r3, #20]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r3, #24]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r3, #28]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r3]
+ adds r2, r2, r7
+ ldr r7, [r3, #44]
+ adcs r4, r10, r4
+ ldr r10, [r3, #36]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r3, #40]
+ ldr r3, [r3, #32]
+ str r7, [sp, #52] @ 4-byte Spill
+ adcs r7, r8, r1
+ ldr r1, [sp] @ 4-byte Reload
+ stm r0, {r2, r4, r7}
+ ldr r2, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [r0, #12]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r2, r7, r2
+ str r2, [r0, #16]
+ adcs r2, r1, r12
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add r12, r0, #32
+ str r2, [r0, #20]
+ ldr r2, [sp, #40] @ 4-byte Reload
+ adcs r2, r1, r2
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r2, [r0, #24]
+ adcs r2, r1, r9
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r2, [r0, #28]
+ adcs r2, r3, r5
+ ldr r5, [sp, #20] @ 4-byte Reload
+ adcs r3, r10, r6
+ ldr r6, [sp, #28] @ 4-byte Reload
+ adcs r7, r1, lr
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r6, r6, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ stm r12, {r2, r3, r7}
+ str r6, [r0, #44]
+ adc r1, r5, r1
+ str r1, [r0, #48]
+.LBB202_2: @ %nocarry
+ add sp, sp, #56
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end202:
+ .size mcl_fp_sub13L, .Lfunc_end202-mcl_fp_sub13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF13L
+ .align 2
+ .type mcl_fp_subNF13L,%function
+mcl_fp_subNF13L: @ @mcl_fp_subNF13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #72
+ sub sp, sp, #72
+ mov r12, r0
+ ldr r0, [r2, #32]
+ add r9, r1, #20
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r2, #36]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r2, #40]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r2, #44]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r2, #48]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r2, {r7, r11}
+ ldr r0, [r2, #8]
+ ldr r10, [r2, #12]
+ ldr r8, [r2, #16]
+ ldr lr, [r1, #16]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r2, #20]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r2, #24]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r2, #28]
+ ldr r2, [r1, #8]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #12]
+ ldm r9, {r4, r5, r9}
+ ldm r1, {r1, r6}
+ subs r7, r1, r7
+ ldr r1, [sp, #52] @ 4-byte Reload
+ sbcs r6, r6, r11
+ str r7, [sp] @ 4-byte Spill
+ str r6, [sp, #4] @ 4-byte Spill
+ sbcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ sbcs r0, r0, r10
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ sbcs r0, lr, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ sbcs r0, r4, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbcs r0, r5, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r9, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ sbcs r11, r1, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r11, [sp, #20] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ sbc r0, r2, r1
+ ldr r1, [r3, #40]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldm r3, {r2, lr}
+ ldr r1, [r3, #20]
+ ldr r5, [r3, #8]
+ ldr r10, [sp, #8] @ 4-byte Reload
+ ldr r4, [r3, #12]
+ ldr r8, [r3, #24]
+ ldr r9, [r3, #28]
+ adds r2, r7, r2
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ adcs r3, r6, lr
+ ldr r6, [sp, #12] @ 4-byte Reload
+ adcs lr, r10, r5
+ ldr r5, [sp, #48] @ 4-byte Reload
+ adcs r4, r5, r4
+ ldr r5, [sp, #52] @ 4-byte Reload
+ adcs r5, r5, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r7, r1, r8
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r8, r1, r9
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r9, r11, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r11, r1, r0
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r1, r0, r1
+ str r1, [sp, #32] @ 4-byte Spill
+ asr r1, r0, #31
+ ldr r0, [sp] @ 4-byte Reload
+ cmp r1, #0
+ movge lr, r10
+ movge r2, r0
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r2, [r12]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ movge r3, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r12, #4]
+ str lr, [r12, #8]
+ movge r4, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r4, [r12, #12]
+ movge r5, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r5, [r12, #16]
+ movge r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ cmp r1, #0
+ str r6, [r12, #20]
+ movge r7, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r7, [r12, #24]
+ movge r8, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ str r8, [r12, #28]
+ movge r9, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ cmp r1, #0
+ str r9, [r12, #32]
+ movge r11, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r11, [r12, #36]
+ movge r2, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ str r2, [r12, #40]
+ ldr r2, [sp, #36] @ 4-byte Reload
+ movge r0, r2
+ cmp r1, #0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [r12, #44]
+ ldr r0, [sp, #32] @ 4-byte Reload
+ movge r0, r1
+ str r0, [r12, #48]
+ add sp, sp, #72
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end203:
+ .size mcl_fp_subNF13L, .Lfunc_end203-mcl_fp_subNF13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add13L
+ .align 2
+ .type mcl_fpDbl_add13L,%function
+mcl_fpDbl_add13L: @ @mcl_fpDbl_add13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #152
+ sub sp, sp, #152
+ ldm r1, {r7, r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r9}
+ add r10, r1, #32
+ adds r4, r4, r7
+ str r4, [sp, #84] @ 4-byte Spill
+ ldr r4, [r2, #96]
+ str r4, [sp, #144] @ 4-byte Spill
+ ldr r4, [r2, #100]
+ str r4, [sp, #148] @ 4-byte Spill
+ adcs r4, r5, r8
+ ldr r8, [r2, #16]
+ adcs r7, r6, lr
+ str r4, [sp, #72] @ 4-byte Spill
+ add lr, r1, #16
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #116] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #128] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #140] @ 4-byte Spill
+ adcs r7, r9, r12
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #100] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r1, #96]
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r9, r10}
+ ldr r2, [r1, #52]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #84] @ 4-byte Reload
+ ldr r7, [sp, #72] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adcs r1, r8, r1
+ str r7, [r0, #8]
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r2, r7, r2
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r1, r1, r12
+ str r1, [r0, #24]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [r0, #28]
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [r0, #32]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r2, r2, r5
+ str r2, [r0, #36]
+ ldr r2, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [r0, #40]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r2, r2, r9
+ str r2, [r0, #44]
+ ldr r2, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r10
+ str r1, [r0, #48]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r6, r2, r7
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r6, [sp, #88] @ 4-byte Spill
+ adcs r5, r1, r2
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r5, [sp, #92] @ 4-byte Spill
+ adcs r4, r1, r2
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r4, [sp, #96] @ 4-byte Spill
+ adcs r7, r1, r2
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r7, [sp, #112] @ 4-byte Spill
+ adcs lr, r1, r2
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str lr, [sp, #100] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ adcs r8, r1, r2
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r8, [sp, #116] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [sp, #132] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [sp, #136] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #136] @ 4-byte Spill
+ ldr r1, [sp, #140] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #140] @ 4-byte Spill
+ ldr r1, [sp, #144] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [sp, #148] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #148] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ str r1, [sp, #108] @ 4-byte Spill
+ ldmib r3, {r2, r9, r12}
+ ldr r1, [r3, #20]
+ ldr r11, [r3]
+ ldr r10, [r3, #16]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ subs r11, r6, r11
+ sbcs r2, r5, r2
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ str r1, [sp, #120] @ 4-byte Spill
+ sbcs r1, r4, r9
+ add r9, r3, #32
+ sbcs r12, r7, r12
+ ldm r9, {r5, r7, r9}
+ ldr r4, [r3, #44]
+ ldr r3, [r3, #48]
+ ldr r6, [sp, #84] @ 4-byte Reload
+ sbcs r10, lr, r10
+ str r3, [sp, #80] @ 4-byte Spill
+ ldr r3, [sp, #124] @ 4-byte Reload
+ str r4, [sp, #76] @ 4-byte Spill
+ sbcs lr, r3, r6
+ ldr r3, [sp, #104] @ 4-byte Reload
+ ldr r6, [sp, #120] @ 4-byte Reload
+ sbcs r4, r8, r3
+ ldr r3, [sp, #128] @ 4-byte Reload
+ sbcs r6, r3, r6
+ ldr r3, [sp, #132] @ 4-byte Reload
+ sbcs r5, r3, r5
+ ldr r3, [sp, #136] @ 4-byte Reload
+ sbcs r8, r3, r7
+ ldr r3, [sp, #140] @ 4-byte Reload
+ ldr r7, [sp, #76] @ 4-byte Reload
+ sbcs r9, r3, r9
+ ldr r3, [sp, #144] @ 4-byte Reload
+ sbcs r3, r3, r7
+ ldr r7, [sp, #80] @ 4-byte Reload
+ str r3, [sp, #120] @ 4-byte Spill
+ ldr r3, [sp, #148] @ 4-byte Reload
+ sbcs r3, r3, r7
+ ldr r7, [sp, #88] @ 4-byte Reload
+ str r3, [sp, #104] @ 4-byte Spill
+ ldr r3, [sp, #108] @ 4-byte Reload
+ sbc r3, r3, #0
+ ands r3, r3, #1
+ movne r11, r7
+ ldr r7, [sp, #92] @ 4-byte Reload
+ str r11, [r0, #52]
+ movne r2, r7
+ str r2, [r0, #56]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ movne r1, r2
+ cmp r3, #0
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str r1, [r0, #60]
+ ldr r1, [sp, #112] @ 4-byte Reload
+ movne r12, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r12, [r0, #64]
+ movne r10, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r10, [r0, #68]
+ movne lr, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ cmp r3, #0
+ str lr, [r0, #72]
+ movne r4, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r4, [r0, #76]
+ movne r6, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r6, [r0, #80]
+ movne r5, r1
+ ldr r1, [sp, #136] @ 4-byte Reload
+ cmp r3, #0
+ str r5, [r0, #84]
+ movne r8, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r8, [r0, #88]
+ movne r9, r1
+ ldr r1, [sp, #144] @ 4-byte Reload
+ str r9, [r0, #92]
+ movne r2, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ cmp r3, #0
+ ldr r3, [sp, #104] @ 4-byte Reload
+ str r2, [r0, #96]
+ movne r3, r1
+ str r3, [r0, #100]
+ add sp, sp, #152
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end204:
+ .size mcl_fpDbl_add13L, .Lfunc_end204-mcl_fpDbl_add13L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub13L
+ .align 2
+ .type mcl_fpDbl_sub13L,%function
+mcl_fpDbl_sub13L: @ @mcl_fpDbl_sub13L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #152
+ sub sp, sp, #152
+ ldr r7, [r2, #96]
+ add r10, r1, #32
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #100]
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #128] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #116] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #108] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #104] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #100] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldm r2, {r9, lr}
+ ldr r6, [r1]
+ ldr r5, [r1, #4]
+ ldr r12, [r2, #8]
+ ldr r4, [r1, #8]
+ ldr r8, [r2, #12]
+ ldr r7, [r1, #12]
+ subs r6, r6, r9
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [r2, #40]
+ str r6, [sp, #80] @ 4-byte Spill
+ sbcs r6, r5, lr
+ add lr, r1, #16
+ str r6, [sp, #28] @ 4-byte Spill
+ ldr r6, [r2, #36]
+ str r6, [sp, #48] @ 4-byte Spill
+ sbcs r6, r4, r12
+ sbcs r7, r7, r8
+ str r6, [sp, #20] @ 4-byte Spill
+ ldr r6, [r2, #32]
+ ldr r8, [r2, #16]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r6, [sp, #40] @ 4-byte Spill
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r1, #96]
+ str r2, [sp, #84] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #88] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #76] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r9, r10}
+ ldr r2, [r1, #52]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #32] @ 4-byte Reload
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #20] @ 4-byte Reload
+ sbcs r1, r1, r8
+ str r7, [r0, #8]
+ ldr r7, [sp, #16] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #24] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #36] @ 4-byte Reload
+ sbcs r1, r12, r1
+ str r1, [r0, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r2, lr, r2
+ str r2, [r0, #28]
+ ldr r2, [sp, #48] @ 4-byte Reload
+ sbcs r1, r4, r1
+ str r1, [r0, #32]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r2, r5, r2
+ str r2, [r0, #36]
+ ldr r2, [sp, #92] @ 4-byte Reload
+ sbcs r1, r6, r1
+ str r1, [r0, #40]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ sbcs r2, r9, r2
+ str r2, [r0, #44]
+ ldr r2, [sp, #100] @ 4-byte Reload
+ sbcs r1, r10, r1
+ add r10, r3, #16
+ str r1, [r0, #48]
+ ldr r1, [sp, #104] @ 4-byte Reload
+ sbcs r9, r7, r2
+ ldr r2, [sp, #4] @ 4-byte Reload
+ ldr r7, [sp, #52] @ 4-byte Reload
+ sbcs r11, r2, r1
+ ldr r1, [sp, #120] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #124] @ 4-byte Reload
+ str r1, [sp, #120] @ 4-byte Spill
+ mov r1, #0
+ sbcs r6, r7, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r6, [sp, #92] @ 4-byte Spill
+ sbcs r2, r7, r2
+ ldr r7, [sp, #56] @ 4-byte Reload
+ str r2, [sp, #124] @ 4-byte Spill
+ ldr r2, [sp, #112] @ 4-byte Reload
+ sbcs r8, r7, r2
+ ldr r2, [sp, #140] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r8, [sp, #96] @ 4-byte Spill
+ sbcs r2, r7, r2
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r2, [sp, #140] @ 4-byte Spill
+ ldr r2, [sp, #132] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r2, [sp, #132] @ 4-byte Spill
+ ldr r2, [sp, #128] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #72] @ 4-byte Reload
+ str r2, [sp, #128] @ 4-byte Spill
+ ldr r2, [sp, #116] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [sp, #136] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r2, [sp, #136] @ 4-byte Spill
+ ldr r2, [sp, #144] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #88] @ 4-byte Reload
+ str r2, [sp, #144] @ 4-byte Spill
+ ldr r2, [sp, #148] @ 4-byte Reload
+ sbcs r2, r7, r2
+ mov r7, r9
+ mov r9, r11
+ sbc r1, r1, #0
+ str r2, [sp, #148] @ 4-byte Spill
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #112] @ 4-byte Spill
+ ldm r3, {r1, r2, r12, lr}
+ ldm r10, {r3, r4, r5, r10}
+ ldr r11, [sp, #120] @ 4-byte Reload
+ adds r1, r7, r1
+ adcs r2, r9, r2
+ adcs r12, r11, r12
+ ldr r11, [sp, #112] @ 4-byte Reload
+ adcs lr, r6, lr
+ ldr r6, [sp, #124] @ 4-byte Reload
+ adcs r3, r6, r3
+ ldr r6, [sp, #140] @ 4-byte Reload
+ adcs r4, r8, r4
+ adcs r8, r6, r5
+ ldr r5, [sp, #132] @ 4-byte Reload
+ ldr r6, [sp, #84] @ 4-byte Reload
+ adcs r10, r5, r10
+ ldr r5, [sp, #128] @ 4-byte Reload
+ adcs r5, r5, r6
+ ldr r6, [sp, #88] @ 4-byte Reload
+ str r5, [sp, #84] @ 4-byte Spill
+ ldr r5, [sp, #116] @ 4-byte Reload
+ adcs r5, r5, r6
+ ldr r6, [sp, #104] @ 4-byte Reload
+ str r5, [sp, #88] @ 4-byte Spill
+ ldr r5, [sp, #136] @ 4-byte Reload
+ adcs r5, r5, r6
+ ldr r6, [sp, #108] @ 4-byte Reload
+ str r5, [sp, #104] @ 4-byte Spill
+ ldr r5, [sp, #144] @ 4-byte Reload
+ adcs r5, r5, r6
+ str r5, [sp, #108] @ 4-byte Spill
+ ldr r5, [sp, #148] @ 4-byte Reload
+ adc r5, r5, r11
+ str r5, [sp, #112] @ 4-byte Spill
+ ldr r5, [sp, #100] @ 4-byte Reload
+ ands r5, r5, #1
+ moveq r1, r7
+ moveq r2, r9
+ str r1, [r0, #52]
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r2, [r0, #56]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ moveq r12, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ cmp r5, #0
+ str r12, [r0, #60]
+ moveq lr, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str lr, [r0, #64]
+ moveq r3, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r3, [r0, #68]
+ ldr r3, [sp, #112] @ 4-byte Reload
+ moveq r4, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ cmp r5, #0
+ str r4, [r0, #72]
+ moveq r8, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r8, [r0, #76]
+ moveq r10, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r10, [r0, #80]
+ moveq r2, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ cmp r5, #0
+ str r2, [r0, #84]
+ ldr r2, [sp, #88] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #136] @ 4-byte Reload
+ str r2, [r0, #88]
+ ldr r2, [sp, #104] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #144] @ 4-byte Reload
+ str r2, [r0, #92]
+ ldr r2, [sp, #108] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ cmp r5, #0
+ str r2, [r0, #96]
+ moveq r3, r1
+ str r3, [r0, #100]
+ add sp, sp, #152
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end205:
+ .size mcl_fpDbl_sub13L, .Lfunc_end205-mcl_fpDbl_sub13L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv448x32,%function
+.LmulPv448x32: @ @mulPv448x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r3, [r1, #32]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #32]
+ ldr r3, [r1, #36]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #36]
+ ldr r3, [r1, #40]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #40]
+ ldr r3, [r1, #44]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #44]
+ ldr r3, [r1, #48]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #48]
+ ldr r1, [r1, #52]
+ umull r3, r7, r1, r2
+ adcs r1, r6, r3
+ str r1, [r0, #52]
+ adc r1, r7, #0
+ str r1, [r0, #56]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end206:
+ .size .LmulPv448x32, .Lfunc_end206-.LmulPv448x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre14L
+ .align 2
+ .type mcl_fp_mulUnitPre14L,%function
+mcl_fp_mulUnitPre14L: @ @mcl_fp_mulUnitPre14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #76
+ sub sp, sp, #76
+ mov r4, r0
+ add r0, sp, #8
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #64]
+ add lr, sp, #8
+ ldr r8, [sp, #56]
+ ldr r9, [sp, #52]
+ ldr r10, [sp, #48]
+ ldr r11, [sp, #44]
+ ldr r5, [sp, #40]
+ ldr r6, [sp, #36]
+ ldr r7, [sp, #32]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #60]
+ str r0, [sp] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ stm r4, {r0, r1, r2, r3, r12, lr}
+ str r7, [r4, #24]
+ str r6, [r4, #28]
+ str r5, [r4, #32]
+ str r11, [r4, #36]
+ str r10, [r4, #40]
+ str r9, [r4, #44]
+ str r8, [r4, #48]
+ ldr r0, [sp] @ 4-byte Reload
+ str r0, [r4, #52]
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r0, [r4, #56]
+ add sp, sp, #76
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end207:
+ .size mcl_fp_mulUnitPre14L, .Lfunc_end207-mcl_fp_mulUnitPre14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre14L
+ .align 2
+ .type mcl_fpDbl_mulPre14L,%function
+mcl_fpDbl_mulPre14L: @ @mcl_fpDbl_mulPre14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #228
+ sub sp, sp, #228
+ mov r6, r2
+ mov r5, r1
+ mov r4, r0
+ bl mcl_fpDbl_mulPre7L(PLT)
+ add r0, r4, #56
+ add r1, r5, #28
+ add r2, r6, #28
+ bl mcl_fpDbl_mulPre7L(PLT)
+ ldr r0, [r6, #32]
+ add r11, r6, #36
+ str r0, [sp, #104] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [r6, #52]
+ ldr r12, [r6]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldmib r6, {r1, r2, r3, r7}
+ ldr r0, [r6, #28]
+ ldr lr, [r6, #24]
+ ldr r6, [r6, #20]
+ adds r0, r12, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ adcs r0, r2, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ adcs r0, r3, r9
+ str r0, [sp, #96] @ 4-byte Spill
+ adcs r0, r7, r10
+ str r0, [sp, #92] @ 4-byte Spill
+ adcs r0, r6, r11
+ add r11, r5, #32
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, lr, r0
+ add lr, r5, #12
+ str r0, [sp, #84] @ 4-byte Spill
+ mov r0, #0
+ ldm r11, {r8, r10, r11}
+ ldr r7, [r5]
+ ldr r3, [r5, #4]
+ ldr r2, [r5, #8]
+ adc r6, r0, #0
+ ldr r0, [r5, #44]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r5, #48]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r5, #52]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r5, #28]
+ ldm lr, {r1, r9, r12, lr}
+ adds r0, r7, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ str r0, [sp, #144]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r7, r3, r8
+ adcs r10, r2, r10
+ add r2, sp, #116
+ str r7, [sp, #148]
+ adcs r11, r1, r11
+ add r1, sp, #144
+ str r10, [sp, #152]
+ str r11, [sp, #156]
+ adcs r5, r9, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r5, [sp, #160]
+ adcs r9, r12, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r9, [sp, #164]
+ adcs r8, lr, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ str r8, [sp, #168]
+ str r0, [sp, #116]
+ ldr r0, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #120]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #124]
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #128]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #132]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #136]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #140]
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ add r0, sp, #172
+ bl mcl_fpDbl_mulPre7L(PLT)
+ ldr r0, [sp, #108] @ 4-byte Reload
+ cmp r6, #0
+ ldr r2, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #100] @ 4-byte Reload
+ moveq r8, r6
+ moveq r9, r6
+ moveq r5, r6
+ moveq r11, r6
+ moveq r10, r6
+ cmp r6, #0
+ moveq r2, r6
+ moveq r7, r6
+ str r2, [sp, #112] @ 4-byte Spill
+ str r7, [sp, #76] @ 4-byte Spill
+ adds r3, r2, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r2, [sp, #92] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #88] @ 4-byte Reload
+ adcs lr, r10, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r11, r1
+ adcs r2, r5, r2
+ adcs r12, r9, r7
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adcs r7, r8, r7
+ str r7, [sp, #104] @ 4-byte Spill
+ mov r7, #0
+ adc r7, r7, #0
+ str r7, [sp, #108] @ 4-byte Spill
+ ldr r7, [sp, #80] @ 4-byte Reload
+ cmp r7, #0
+ moveq r2, r5
+ ldr r5, [sp, #76] @ 4-byte Reload
+ moveq r1, r11
+ moveq lr, r10
+ ldr r11, [sp, #104] @ 4-byte Reload
+ moveq r0, r5
+ ldr r5, [sp, #112] @ 4-byte Reload
+ moveq r3, r5
+ cmp r7, #0
+ ldr r5, [sp, #108] @ 4-byte Reload
+ moveq r5, r7
+ and r7, r6, r7
+ ldr r6, [sp, #200]
+ moveq r12, r9
+ moveq r11, r8
+ adds r10, r3, r6
+ ldr r3, [sp, #204]
+ adcs r8, r0, r3
+ ldr r0, [sp, #208]
+ add r3, sp, #172
+ adcs r9, lr, r0
+ ldr r0, [sp, #212]
+ ldr lr, [r4]
+ adcs r0, r1, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #216]
+ adcs r0, r2, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #220]
+ adcs r0, r12, r0
+ ldr r12, [r4, #4]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #224]
+ adcs r0, r11, r0
+ ldr r11, [r4, #12]
+ str r0, [sp, #92] @ 4-byte Spill
+ adc r0, r5, r7
+ ldr r5, [r4, #8]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldm r3, {r0, r1, r2, r3}
+ subs lr, r0, lr
+ sbcs r12, r1, r12
+ ldr r1, [sp, #188]
+ sbcs r5, r2, r5
+ ldr r2, [r4, #36]
+ sbcs r0, r3, r11
+ ldr r3, [sp, #104] @ 4-byte Reload
+ ldr r11, [r4, #60]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r4, #16]
+ str r2, [sp, #112] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #192]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r4, #20]
+ sbcs r0, r1, r0
+ ldr r1, [sp, #196]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r4, #24]
+ sbcs r6, r1, r0
+ ldr r0, [r4, #28]
+ sbcs r7, r10, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r4, #32]
+ ldr r10, [r4, #56]
+ sbcs r8, r8, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ sbcs r9, r9, r2
+ ldr r2, [r4, #40]
+ sbcs r0, r3, r2
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r4, #44]
+ ldr r3, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ sbcs r0, r3, r2
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r4, #48]
+ ldr r3, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ sbcs r0, r3, r2
+ str r2, [sp, #100] @ 4-byte Spill
+ ldr r2, [r4, #52]
+ ldr r3, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ sbcs r0, r3, r2
+ str r2, [sp, #96] @ 4-byte Spill
+ ldr r2, [sp, #88] @ 4-byte Reload
+ ldr r3, [r4, #68]
+ str r0, [sp, #56] @ 4-byte Spill
+ sbc r0, r2, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ subs r0, lr, r10
+ ldr lr, [r4, #76]
+ str r0, [sp, #48] @ 4-byte Spill
+ sbcs r0, r12, r11
+ ldr r12, [r4, #72]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r4, #64]
+ str r0, [sp, #36] @ 4-byte Spill
+ sbcs r0, r5, r0
+ ldr r5, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ sbcs r0, r5, r3
+ ldr r5, [r4, #80]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ sbcs r0, r6, r5
+ ldr r6, [r4, #84]
+ str r0, [sp, #24] @ 4-byte Spill
+ sbcs r0, r7, r6
+ str r6, [sp, #92] @ 4-byte Spill
+ ldr r6, [r4, #88]
+ str r0, [sp, #20] @ 4-byte Spill
+ sbcs r0, r8, r6
+ str r6, [sp, #88] @ 4-byte Spill
+ ldr r6, [r4, #92]
+ str r0, [sp, #16] @ 4-byte Spill
+ sbcs r0, r9, r6
+ add r9, r4, #96
+ str r6, [sp, #84] @ 4-byte Spill
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r6, r7, r8, r9}
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r1, [sp, #48] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ sbcs r0, r0, r6
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ sbcs r0, r0, r7
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ sbcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ sbc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adds r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [r4, #28]
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [r4, #32]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [r4, #36]
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r1, [r4, #40]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r0, [r4, #44]
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [r4, #48]
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r0, [r4, #52]
+ adcs r1, r10, r1
+ ldr r0, [sp, #16] @ 4-byte Reload
+ str r1, [r4, #56]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [r4, #60]
+ adcs r1, r1, r2
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r1, [r4, #64]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r3, r0
+ adcs r1, r12, r1
+ str r0, [r4, #68]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ add r12, r4, #92
+ str r1, [r4, #72]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r0, lr, r0
+ adcs r1, r5, r1
+ str r0, [r4, #76]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r1, [r4, #80]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [r4, #84]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [r4, #88]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ adcs r1, r6, #0
+ adcs r2, r7, #0
+ adcs r3, r8, #0
+ adc r7, r9, #0
+ stm r12, {r0, r1, r2, r3, r7}
+ add sp, sp, #228
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end208:
+ .size mcl_fpDbl_mulPre14L, .Lfunc_end208-mcl_fpDbl_mulPre14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre14L
+ .align 2
+ .type mcl_fpDbl_sqrPre14L,%function
+mcl_fpDbl_sqrPre14L: @ @mcl_fpDbl_sqrPre14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #220
+ sub sp, sp, #220
+ mov r5, r1
+ mov r4, r0
+ mov r2, r5
+ bl mcl_fpDbl_mulPre7L(PLT)
+ add r1, r5, #28
+ add r0, r4, #56
+ mov r2, r1
+ bl mcl_fpDbl_mulPre7L(PLT)
+ ldr r0, [r5, #44]
+ ldr r11, [r5, #32]
+ ldr r10, [r5, #36]
+ ldr r8, [r5, #40]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r5, #48]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r5, #52]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldm r5, {r6, r7}
+ ldr r0, [r5, #28]
+ ldr r3, [r5, #8]
+ ldr r2, [r5, #12]
+ ldr r12, [r5, #16]
+ ldr lr, [r5, #24]
+ ldr r1, [r5, #20]
+ ldr r5, [sp, #96] @ 4-byte Reload
+ adds r9, r6, r0
+ adcs r0, r7, r11
+ ldr r7, [sp, #100] @ 4-byte Reload
+ str r9, [sp, #136]
+ str r9, [sp, #108]
+ adcs r3, r3, r10
+ str r0, [sp, #140]
+ str r0, [sp, #112]
+ adcs r2, r2, r8
+ str r3, [sp, #144]
+ str r3, [sp, #116]
+ adcs r6, r12, r5
+ str r2, [sp, #148]
+ str r2, [sp, #120]
+ adcs r1, r1, r7
+ ldr r7, [sp, #104] @ 4-byte Reload
+ str r6, [sp, #152]
+ str r6, [sp, #124]
+ lsr r5, r1, #31
+ str r1, [sp, #156]
+ str r1, [sp, #128]
+ adcs r8, lr, r7
+ orr r5, r5, r8, lsl #1
+ str r8, [sp, #160]
+ str r8, [sp, #132]
+ str r5, [sp, #104] @ 4-byte Spill
+ lsr r5, r6, #31
+ orr r1, r5, r1, lsl #1
+ str r1, [sp, #100] @ 4-byte Spill
+ lsr r1, r2, #31
+ orr r1, r1, r6, lsl #1
+ str r1, [sp, #96] @ 4-byte Spill
+ lsr r1, r3, #31
+ orr r1, r1, r2, lsl #1
+ add r2, sp, #108
+ str r1, [sp, #92] @ 4-byte Spill
+ lsr r1, r0, #31
+ orr r1, r1, r3, lsl #1
+ str r1, [sp, #84] @ 4-byte Spill
+ lsr r1, r9, #31
+ orr r0, r1, r0, lsl #1
+ add r1, sp, #136
+ str r0, [sp, #76] @ 4-byte Spill
+ mov r0, #0
+ adc r6, r0, #0
+ add r0, sp, #164
+ bl mcl_fpDbl_mulPre7L(PLT)
+ add lr, sp, #204
+ add r7, sp, #192
+ ldm lr, {r5, r10, r11, lr}
+ ldm r7, {r0, r1, r7}
+ ldr r2, [sp, #100] @ 4-byte Reload
+ ldr r3, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ adds r0, r0, r9, lsl #1
+ mov r9, r1
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r12, r7, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r1, r5, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r10, r0
+ adcs r2, r11, r2
+ adcs r3, lr, r3
+ adc r8, r6, r8, lsr #31
+ cmp r6, #0
+ moveq r0, r10
+ moveq r1, r5
+ moveq r3, lr
+ moveq r2, r11
+ moveq r12, r7
+ cmp r6, #0
+ ldr lr, [r4]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ moveq r8, r6
+ str r2, [sp, #100] @ 4-byte Spill
+ mov r5, r3
+ ldr r3, [sp, #76] @ 4-byte Reload
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r1, [sp, #96] @ 4-byte Spill
+ mov r7, r8
+ add r8, sp, #164
+ moveq r3, r9
+ ldmib r4, {r9, r10, r11}
+ moveq r2, r0
+ ldm r8, {r0, r1, r8}
+ ldr r6, [sp, #176]
+ subs lr, r0, lr
+ sbcs r0, r1, r9
+ ldr r1, [sp, #180]
+ str r0, [sp, #60] @ 4-byte Spill
+ sbcs r0, r8, r10
+ ldr r10, [r4, #56]
+ str r0, [sp, #76] @ 4-byte Spill
+ sbcs r0, r6, r11
+ ldr r11, [r4, #60]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r4, #16]
+ sbcs r0, r1, r0
+ ldr r1, [sp, #184]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r4, #20]
+ sbcs r0, r1, r0
+ ldr r1, [sp, #188]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r4, #24]
+ sbcs r6, r1, r0
+ ldr r1, [r4, #28]
+ ldr r0, [r4, #32]
+ sbcs r9, r2, r1
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r2, [sp, #96] @ 4-byte Reload
+ sbcs r8, r3, r0
+ ldr r0, [r4, #36]
+ ldr r3, [r4, #68]
+ str r0, [sp, #88] @ 4-byte Spill
+ sbcs r0, r12, r0
+ ldr r12, [r4, #72]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r4, #40]
+ str r0, [sp, #84] @ 4-byte Spill
+ sbcs r0, r2, r0
+ ldr r2, [r4, #44]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ str r2, [sp, #96] @ 4-byte Spill
+ sbcs r0, r0, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r4, #48]
+ str r0, [sp, #104] @ 4-byte Spill
+ sbcs r0, r2, r0
+ ldr r2, [r4, #64]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r4, #52]
+ str r2, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #100] @ 4-byte Spill
+ sbcs r0, r5, r0
+ ldr r5, [r4, #80]
+ str r0, [sp, #44] @ 4-byte Spill
+ sbc r0, r7, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ subs r0, lr, r10
+ ldr lr, [r4, #76]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbcs r0, r0, r11
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r0, r0, r2
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ sbcs r0, r0, r3
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbcs r0, r0, r12
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ sbcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ sbcs r0, r6, r5
+ ldr r6, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r4, #84]
+ str r0, [sp, #80] @ 4-byte Spill
+ sbcs r0, r9, r0
+ add r9, r4, #96
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r4, #88]
+ str r0, [sp, #76] @ 4-byte Spill
+ sbcs r0, r8, r0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [r4, #92]
+ str r0, [sp, #72] @ 4-byte Spill
+ sbcs r0, r6, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldm r9, {r6, r7, r8, r9}
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r2, [sp, #60] @ 4-byte Reload
+ sbcs r0, r0, r6
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r0, r7
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ sbcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ sbc r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adds r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [r4, #28]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [r4, #32]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [r4, #36]
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [r4, #40]
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r0, [r4, #44]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [r4, #48]
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r0, [r4, #52]
+ adcs r1, r10, r1
+ ldr r0, [sp, #8] @ 4-byte Reload
+ str r1, [r4, #56]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [r4, #60]
+ adcs r1, r1, r2
+ ldr r0, [sp] @ 4-byte Reload
+ str r1, [r4, #64]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r0, r3, r0
+ adcs r1, r12, r1
+ str r0, [r4, #68]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ add r12, r4, #92
+ str r1, [r4, #72]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r0, lr, r0
+ adcs r1, r5, r1
+ str r0, [r4, #76]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r1, [r4, #80]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [r4, #84]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [r4, #88]
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ adcs r1, r6, #0
+ adcs r2, r7, #0
+ adcs r3, r8, #0
+ adc r7, r9, #0
+ stm r12, {r0, r1, r2, r3, r7}
+ add sp, sp, #220
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end209:
+ .size mcl_fpDbl_sqrPre14L, .Lfunc_end209-mcl_fpDbl_sqrPre14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont14L
+ .align 2
+ .type mcl_fp_mont14L,%function
+mcl_fp_mont14L: @ @mcl_fp_mont14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #892
+ sub sp, sp, #892
+ .pad #1024
+ sub sp, sp, #1024
+ add r12, sp, #108
+ add r7, sp, #1024
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, r7, #824
+ ldr r6, [r3, #-4]
+ ldr r2, [r2]
+ str r6, [sp, #104] @ 4-byte Spill
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1852]
+ ldr r5, [sp, #1848]
+ add r8, sp, #1024
+ mov r1, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1856]
+ mul r2, r5, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1860]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #1904]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #1900]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #1896]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #1892]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #1888]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1884]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1880]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1876]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1872]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1868]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1864]
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, r8, #760
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1840]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r10, [sp, #1808]
+ ldr r11, [sp, #1804]
+ ldr r7, [sp, #1800]
+ ldr r9, [sp, #1784]
+ ldr r4, [sp, #1788]
+ ldr r6, [sp, #1792]
+ ldr r8, [sp, #1796]
+ add lr, sp, #1024
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1836]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1832]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1828]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1824]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1820]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1816]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1812]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, lr, #696
+ bl .LmulPv448x32(PLT)
+ adds r0, r9, r5
+ ldr r1, [sp, #48] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ ldr r3, [sp, #1736]
+ ldr r12, [sp, #1740]
+ ldr lr, [sp, #1744]
+ ldr r5, [sp, #1752]
+ ldr r9, [sp, #1760]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #1748]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #1720]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #1756]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adcs r1, r11, r1
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, #0
+ ldr r11, [sp, #80] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r10, r1
+ ldr r10, [sp, #1764]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #1732]
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #1728]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1776]
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1772]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1768]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #1724]
+ adcs r0, r7, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r6, r0
+ add r6, sp, #1024
+ add r0, r6, #632
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1712]
+ add r11, sp, #1664
+ ldr r8, [sp, #1684]
+ ldr r9, [sp, #1680]
+ ldr r10, [sp, #1676]
+ ldr r4, [sp, #1656]
+ ldr r7, [sp, #1660]
+ add lr, sp, #1024
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1708]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1704]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1700]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1696]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1692]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1688]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, lr, #568
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r2, [sp, #1604]
+ ldr r3, [sp, #1608]
+ ldr r12, [sp, #1612]
+ ldr lr, [sp, #1616]
+ adds r0, r0, r4
+ ldr r4, [sp, #1620]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1624]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1592]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1636]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1632]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1628]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1600]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1648]
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1644]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1640]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1596]
+ adcs r0, r7, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r6, r0
+ add r6, sp, #1024
+ add r0, r6, #504
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1584]
+ add r11, sp, #1536
+ ldr r8, [sp, #1556]
+ ldr r9, [sp, #1552]
+ ldr r10, [sp, #1548]
+ ldr r4, [sp, #1528]
+ ldr r7, [sp, #1532]
+ add lr, sp, #1024
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1580]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1576]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1572]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1568]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1564]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1560]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, lr, #440
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r2, [sp, #1476]
+ ldr r3, [sp, #1480]
+ ldr r12, [sp, #1484]
+ ldr lr, [sp, #1488]
+ adds r0, r0, r4
+ ldr r4, [sp, #1492]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1496]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1464]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1508]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1504]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1500]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1472]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1520]
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1516]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1512]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1468]
+ adcs r0, r7, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r6, r0
+ add r6, sp, #1024
+ add r0, r6, #376
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1456]
+ add r11, sp, #1408
+ ldr r8, [sp, #1428]
+ ldr r9, [sp, #1424]
+ ldr r10, [sp, #1420]
+ ldr r4, [sp, #1400]
+ ldr r7, [sp, #1404]
+ add lr, sp, #1024
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1452]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1448]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1444]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1440]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1436]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1432]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, lr, #312
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r2, [sp, #1348]
+ ldr r3, [sp, #1352]
+ ldr r12, [sp, #1356]
+ ldr lr, [sp, #1360]
+ adds r0, r0, r4
+ ldr r4, [sp, #1364]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1368]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1336]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1380]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1376]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1372]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1344]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1392]
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1388]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1384]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ adcs r0, r7, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r6, r0
+ add r6, sp, #1024
+ add r0, r6, #248
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1328]
+ add r11, sp, #1280
+ ldr r8, [sp, #1300]
+ ldr r9, [sp, #1296]
+ ldr r10, [sp, #1292]
+ ldr r4, [sp, #1272]
+ ldr r7, [sp, #1276]
+ add lr, sp, #1024
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1324]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1320]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1316]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1312]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1308]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1304]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, lr, #184
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r2, [sp, #1220]
+ ldr r3, [sp, #1224]
+ ldr r12, [sp, #1228]
+ ldr lr, [sp, #1232]
+ adds r0, r0, r4
+ ldr r4, [sp, #1236]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1240]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1208]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1252]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1248]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1244]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1216]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1260]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1256]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1212]
+ adcs r0, r7, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r6, r0
+ add r6, sp, #1024
+ add r0, r6, #120
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1200]
+ add r11, sp, #1152
+ ldr r8, [sp, #1172]
+ ldr r9, [sp, #1168]
+ ldr r10, [sp, #1164]
+ ldr r4, [sp, #1144]
+ ldr r7, [sp, #1148]
+ add lr, sp, #1024
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1184]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1180]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1176]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, lr, #56
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ ldr r2, [sp, #1092]
+ ldr r3, [sp, #1096]
+ ldr r12, [sp, #1100]
+ ldr lr, [sp, #1104]
+ adds r0, r0, r4
+ ldr r4, [sp, #1108]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1112]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1080]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1124]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1120]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1116]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1088]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1132]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1128]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1084]
+ adcs r0, r7, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #1016
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1072]
+ add r11, sp, #1024
+ ldr r8, [sp, #1044]
+ ldr r9, [sp, #1040]
+ ldr r10, [sp, #1036]
+ ldr r4, [sp, #1016]
+ ldr r7, [sp, #1020]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #952
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #956
+ adds r0, r0, r4
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #980
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1008]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1004]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1000]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #952]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #888
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #944]
+ add r11, sp, #896
+ ldr r8, [sp, #916]
+ ldr r9, [sp, #912]
+ ldr r10, [sp, #908]
+ ldr r4, [sp, #888]
+ ldr r7, [sp, #892]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #940]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #936]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #932]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #824
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #828
+ adds r0, r0, r4
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #852
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #880]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #876]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #872]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #824]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #760
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #816]
+ add r11, sp, #768
+ ldr r8, [sp, #788]
+ ldr r9, [sp, #784]
+ ldr r10, [sp, #780]
+ ldr r4, [sp, #760]
+ ldr r7, [sp, #764]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r5, r6, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #696
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #700
+ adds r0, r0, r4
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #724
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #752]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #748]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #744]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #696]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ mul r2, r6, r5
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #632
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #688]
+ add r11, sp, #632
+ ldr r6, [sp, #656]
+ ldr r4, [sp, #652]
+ ldr r7, [sp, #648]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #684]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #680]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #676]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #672]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #664]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #660]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #568
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #4] @ 4-byte Reload
+ add lr, sp, #584
+ adds r0, r0, r8
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r2, r0, r9
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #608
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #568
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r11, {r4, r6, r7, r11}
+ adds r0, r2, r4
+ mul r1, r0, r5
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #624]
+ str r1, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r5, [sp, #96] @ 4-byte Reload
+ adcs r6, r5, r6
+ ldr r5, [sp, #92] @ 4-byte Reload
+ str r6, [sp, #96] @ 4-byte Spill
+ adcs r6, r5, r7
+ ldr r5, [sp, #88] @ 4-byte Reload
+ str r6, [sp, #92] @ 4-byte Spill
+ adcs r6, r5, r11
+ ldr r5, [sp, #84] @ 4-byte Reload
+ str r6, [sp, #88] @ 4-byte Spill
+ adcs r0, r5, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #504
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #560]
+ add r10, sp, #504
+ ldr r11, [sp, #532]
+ ldr r4, [sp, #528]
+ ldr r6, [sp, #524]
+ ldr r7, [sp, #520]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #548]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #540]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #536]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #440
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ add lr, sp, #456
+ adds r0, r0, r5
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r8
+ adcs r1, r1, r9
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #480
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r7
+ add r7, sp, #440
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #24] @ 4-byte Spill
+ ldm r7, {r4, r6, r7}
+ ldr r5, [sp, #452]
+ adds r1, r0, r4
+ ldr r0, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ mul r2, r1, r0
+ ldr r0, [sp, #496]
+ str r2, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #96] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #48] @ 4-byte Spill
+ ldr r6, [sp, #92] @ 4-byte Reload
+ adcs r6, r6, r7
+ str r6, [sp, #44] @ 4-byte Spill
+ ldr r6, [sp, #88] @ 4-byte Reload
+ adcs r5, r6, r5
+ str r5, [sp, #40] @ 4-byte Spill
+ ldr r5, [sp, #84] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #376
+ bl .LmulPv448x32(PLT)
+ ldr r1, [sp, #432]
+ ldr r8, [sp, #404]
+ ldr r9, [sp, #400]
+ ldr r10, [sp, #396]
+ ldr r11, [sp, #392]
+ ldr r6, [sp, #376]
+ ldr r5, [sp, #380]
+ ldr r7, [sp, #384]
+ ldr r4, [sp, #388]
+ add r0, sp, #312
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #428]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #424]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #420]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #416]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #412]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #408]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #316
+ adds r0, r0, r6
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #340
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #364]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r4, r6, r7, r8, r9, r10}
+ ldr r5, [sp, #312]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ adds r11, r11, r5
+ ldr r5, [sp, #48] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #248
+ bl .LmulPv448x32(PLT)
+ ldr r1, [sp, #304]
+ ldr r10, [sp, #272]
+ ldr r11, [sp, #268]
+ ldr r8, [sp, #264]
+ ldr r6, [sp, #248]
+ ldr r7, [sp, #252]
+ ldr r4, [sp, #256]
+ ldr r9, [sp, #260]
+ add r0, sp, #184
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #300]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #296]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #292]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #288]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #284]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #280]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #276]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r2, [r1, #52]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #200
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r3, r0, r7
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #184
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #224
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldm r8, {r4, r7, r8}
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r5, [sp, #196]
+ adds r4, r3, r4
+ mul r1, r4, r0
+ ldr r0, [sp, #240]
+ str r1, [sp, #48] @ 4-byte Spill
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #236]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r6, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #112] @ 4-byte Reload
+ adcs r11, r11, r7
+ ldr r7, [sp, #108] @ 4-byte Reload
+ adcs r8, r7, r8
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adcs r5, r7, r5
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r9, r0, r9
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ mov r0, #0
+ adc r7, r0, #0
+ add r0, sp, #120
+ bl .LmulPv448x32(PLT)
+ add r3, sp, #120
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r4, r0
+ adcs r4, r11, r1
+ ldr r0, [sp, #136]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r6, r8, r2
+ str r4, [sp, #36] @ 4-byte Spill
+ adcs r12, r5, r3
+ str r6, [sp, #48] @ 4-byte Spill
+ str r12, [sp, #56] @ 4-byte Spill
+ adcs r8, r1, r0
+ ldr r0, [sp, #140]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r8, [sp, #64] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #144]
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #148]
+ adcs r0, r1, r0
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #152]
+ adcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #156]
+ adcs r0, r1, r0
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #160]
+ adcs r0, r1, r0
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #164]
+ adcs r0, r9, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #168]
+ adcs r0, r1, r0
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #172]
+ adcs r0, r1, r0
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #176]
+ adcs r0, r1, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ adc r0, r7, #0
+ mov r7, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldmib r7, {r1, r2, r3, r10, r11, lr}
+ ldr r5, [r7]
+ ldr r0, [r7, #28]
+ ldr r9, [r7, #44]
+ subs r5, r4, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r7, #40]
+ sbcs r6, r6, r1
+ ldr r1, [r7, #32]
+ ldr r4, [sp, #68] @ 4-byte Reload
+ sbcs r2, r12, r2
+ sbcs r12, r8, r3
+ ldr r3, [r7, #48]
+ ldr r8, [r7, #36]
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [r7, #52]
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r3, [sp, #116] @ 4-byte Spill
+ ldr r3, [sp, #80] @ 4-byte Reload
+ sbcs r10, r3, r10
+ ldr r3, [sp, #76] @ 4-byte Reload
+ sbcs r3, r3, r11
+ sbcs lr, r7, lr
+ ldr r7, [sp, #88] @ 4-byte Reload
+ sbcs r4, r7, r4
+ ldr r7, [sp, #92] @ 4-byte Reload
+ sbcs r7, r7, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ sbcs r8, r1, r8
+ ldr r1, [sp, #100] @ 4-byte Reload
+ sbcs r11, r1, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #52] @ 4-byte Reload
+ sbcs r9, r0, r9
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ sbcs r0, r0, r1
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbc r0, r0, #0
+ ands r1, r0, #1
+ ldr r0, [sp, #36] @ 4-byte Reload
+ movne r5, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r5, [r0]
+ ldr r5, [sp, #48] @ 4-byte Reload
+ movne r6, r5
+ ldr r5, [sp, #56] @ 4-byte Reload
+ str r6, [r0, #4]
+ movne r2, r5
+ cmp r1, #0
+ str r2, [r0, #8]
+ ldr r2, [sp, #64] @ 4-byte Reload
+ movne r12, r2
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r12, [r0, #12]
+ movne r10, r2
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str r10, [r0, #16]
+ movne r3, r2
+ ldr r2, [sp, #84] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #20]
+ movne lr, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str lr, [r0, #24]
+ movne r4, r2
+ ldr r2, [sp, #92] @ 4-byte Reload
+ str r4, [r0, #28]
+ movne r7, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ cmp r1, #0
+ str r7, [r0, #32]
+ movne r8, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ str r8, [r0, #36]
+ movne r11, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r11, [r0, #40]
+ movne r9, r2
+ cmp r1, #0
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r9, [r0, #44]
+ movne r2, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r2, [r0, #48]
+ ldr r2, [sp, #116] @ 4-byte Reload
+ movne r2, r1
+ str r2, [r0, #52]
+ add sp, sp, #892
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end210:
+ .size mcl_fp_mont14L, .Lfunc_end210-mcl_fp_mont14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF14L
+ .align 2
+ .type mcl_fp_montNF14L,%function
+mcl_fp_montNF14L: @ @mcl_fp_montNF14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #892
+ sub sp, sp, #892
+ .pad #1024
+ sub sp, sp, #1024
+ add r12, sp, #108
+ add r6, sp, #1024
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #76] @ 4-byte Spill
+ add r0, r6, #824
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #104] @ 4-byte Spill
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1852]
+ ldr r8, [sp, #1848]
+ add r10, sp, #1024
+ mov r1, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1856]
+ mul r2, r8, r5
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #1860]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #1904]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #1900]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #1896]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #1892]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #1888]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1884]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1880]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1876]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1872]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1868]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1864]
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, r10, #760
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1840]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r11, [sp, #1808]
+ ldr r6, [sp, #1804]
+ ldr r7, [sp, #1800]
+ ldr r5, [sp, #1784]
+ ldr r9, [sp, #1788]
+ ldr r10, [sp, #1792]
+ ldr r4, [sp, #1796]
+ add lr, sp, #1024
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1836]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1832]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1828]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1824]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1820]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1816]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1812]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, lr, #696
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r8
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r5, [sp, #1720]
+ ldr r2, [sp, #1732]
+ ldr r3, [sp, #1736]
+ ldr r12, [sp, #1740]
+ ldr lr, [sp, #1744]
+ ldr r8, [sp, #1760]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #1764]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #1768]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #1748]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #1756]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #1752]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r11, r0
+ ldr r11, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adc r0, r1, r0
+ adds r11, r11, r5
+ ldr r5, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #1728]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1776]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1772]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1724]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #1024
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, r8, #632
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1712]
+ add r11, sp, #1664
+ ldr r9, [sp, #1680]
+ ldr r10, [sp, #1676]
+ ldr r6, [sp, #1656]
+ ldr r7, [sp, #1660]
+ add lr, sp, #1024
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1708]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1704]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1700]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1696]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1692]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1688]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1684]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, lr, #568
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r5, [sp, #1592]
+ ldr r2, [sp, #1604]
+ ldr r3, [sp, #1608]
+ ldr r12, [sp, #1612]
+ ldr lr, [sp, #1616]
+ ldr r6, [sp, #1624]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1628]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1620]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1632]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1640]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1636]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r11, r11, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #1600]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1648]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1644]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1596]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #1024
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, r8, #504
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1584]
+ add r11, sp, #1536
+ ldr r9, [sp, #1552]
+ ldr r10, [sp, #1548]
+ ldr r6, [sp, #1528]
+ ldr r7, [sp, #1532]
+ add lr, sp, #1024
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1580]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1576]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1572]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1568]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1564]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1560]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1556]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, lr, #440
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r5, [sp, #1464]
+ ldr r2, [sp, #1476]
+ ldr r3, [sp, #1480]
+ ldr r12, [sp, #1484]
+ ldr lr, [sp, #1488]
+ ldr r6, [sp, #1496]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1500]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1492]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1504]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1512]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1508]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r11, r11, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #1472]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1520]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1516]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1468]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #1024
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, r8, #376
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1456]
+ add r11, sp, #1408
+ ldr r9, [sp, #1424]
+ ldr r10, [sp, #1420]
+ ldr r6, [sp, #1400]
+ ldr r7, [sp, #1404]
+ add lr, sp, #1024
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1452]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1448]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1444]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1440]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1436]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1432]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1428]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, lr, #312
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r5, [sp, #1336]
+ ldr r2, [sp, #1348]
+ ldr r3, [sp, #1352]
+ ldr r12, [sp, #1356]
+ ldr lr, [sp, #1360]
+ ldr r6, [sp, #1368]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1372]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1364]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1376]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1384]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1380]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r11, r11, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #1344]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1392]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1388]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #1024
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, r8, #248
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1328]
+ add r11, sp, #1280
+ ldr r9, [sp, #1296]
+ ldr r10, [sp, #1292]
+ ldr r6, [sp, #1272]
+ ldr r7, [sp, #1276]
+ add lr, sp, #1024
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1324]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1320]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1316]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1312]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1308]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1304]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1300]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, lr, #184
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r5, [sp, #1208]
+ ldr r2, [sp, #1220]
+ ldr r3, [sp, #1224]
+ ldr r12, [sp, #1228]
+ ldr lr, [sp, #1232]
+ ldr r6, [sp, #1240]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1244]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1236]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1248]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1256]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1252]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r11, r11, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #1216]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1260]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1212]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #1024
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, r8, #120
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1200]
+ add r11, sp, #1152
+ ldr r9, [sp, #1168]
+ ldr r10, [sp, #1164]
+ ldr r6, [sp, #1144]
+ ldr r7, [sp, #1148]
+ add lr, sp, #1024
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1184]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1180]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1176]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1172]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, lr, #56
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r5, [sp, #1080]
+ ldr r2, [sp, #1092]
+ ldr r3, [sp, #1096]
+ ldr r12, [sp, #1100]
+ ldr lr, [sp, #1104]
+ ldr r6, [sp, #1112]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1116]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1108]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1120]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1128]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1124]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r11, r11, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ ldr r1, [sp, #1088]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1132]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1084]
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #1016
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1072]
+ add r11, sp, #1024
+ ldr r9, [sp, #1040]
+ ldr r10, [sp, #1036]
+ ldr r6, [sp, #1016]
+ ldr r7, [sp, #1020]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #952
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #956
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #980
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1008]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1004]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r6, r7, r8, r9, r10}
+ ldr r5, [sp, #952]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ adds r11, r11, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #888
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #944]
+ add r11, sp, #896
+ ldr r9, [sp, #912]
+ ldr r10, [sp, #908]
+ ldr r6, [sp, #888]
+ ldr r7, [sp, #892]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #940]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #936]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #932]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #916]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #824
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #828
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #852
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #880]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #876]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r6, r7, r8, r9, r10}
+ ldr r5, [sp, #824]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ adds r11, r11, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #760
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #816]
+ add r11, sp, #768
+ ldr r9, [sp, #784]
+ ldr r10, [sp, #780]
+ ldr r6, [sp, #760]
+ ldr r7, [sp, #764]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #696
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #700
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #724
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #752]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #748]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #744]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r6, r8, r9, r10}
+ ldr r5, [sp, #696]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adds r5, r11, r5
+ adcs r0, r7, r0
+ str r5, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mul r2, r5, r9
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #632
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #688]
+ add r11, sp, #640
+ ldr r5, [sp, #656]
+ ldr r10, [sp, #652]
+ ldr r6, [sp, #632]
+ ldr r7, [sp, #636]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #684]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #680]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #676]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #672]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #664]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #660]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #568
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #96] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ add lr, sp, #584
+ adds r0, r0, r6
+ ldr r6, [sp, #580]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #572]
+ adcs r1, r1, r4
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #608
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #576]
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #568]
+ str r1, [sp, #44] @ 4-byte Spill
+ adds r0, r0, r2
+ mul r1, r0, r9
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #624]
+ str r1, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #96] @ 4-byte Reload
+ adcs r7, r11, r7
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [sp, #92] @ 4-byte Reload
+ adcs r5, r7, r5
+ str r5, [sp, #92] @ 4-byte Spill
+ ldr r5, [sp, #88] @ 4-byte Reload
+ adcs r5, r5, r6
+ str r5, [sp, #88] @ 4-byte Spill
+ ldr r5, [sp, #84] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #504
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #560]
+ add r10, sp, #508
+ ldr r7, [sp, #532]
+ ldr r8, [sp, #528]
+ ldr r9, [sp, #524]
+ ldr r11, [sp, #504]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #548]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #540]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #536]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r10}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #440
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #456
+ adds r0, r0, r11
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ adcs r1, r1, r5
+ ldr r5, [sp, #448]
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #452]
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #480
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r7
+ ldr r7, [sp, #444]
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adc r1, r1, r2
+ ldr r2, [sp, #440]
+ str r1, [sp, #36] @ 4-byte Spill
+ adds r1, r0, r2
+ ldr r0, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ mul r2, r1, r0
+ ldr r0, [sp, #496]
+ str r2, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #96] @ 4-byte Reload
+ adcs r7, r11, r7
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [sp, #92] @ 4-byte Reload
+ adcs r5, r7, r5
+ str r5, [sp, #48] @ 4-byte Spill
+ ldr r5, [sp, #88] @ 4-byte Reload
+ adcs r5, r5, r6
+ str r5, [sp, #44] @ 4-byte Spill
+ ldr r5, [sp, #84] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #376
+ bl .LmulPv448x32(PLT)
+ ldr r1, [sp, #432]
+ add r10, sp, #380
+ ldr r7, [sp, #404]
+ ldr r8, [sp, #400]
+ ldr r9, [sp, #396]
+ ldr r11, [sp, #376]
+ add r0, sp, #312
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #428]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #424]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #420]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #416]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #412]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #408]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r10}
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #316
+ adds r0, r0, r11
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #340
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #364]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r6, r7, r8, r9, r10}
+ ldr r5, [sp, #312]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ adds r11, r11, r5
+ ldr r5, [sp, #52] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r11
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #248
+ bl .LmulPv448x32(PLT)
+ ldr r1, [sp, #304]
+ ldr r10, [sp, #272]
+ ldr r11, [sp, #268]
+ ldr r8, [sp, #264]
+ ldr r6, [sp, #248]
+ ldr r7, [sp, #252]
+ ldr r4, [sp, #256]
+ ldr r9, [sp, #260]
+ add r0, sp, #184
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #300]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #296]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #292]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #288]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #284]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #280]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #276]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r2, [r1, #52]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ bl .LmulPv448x32(PLT)
+ adds r0, r5, r6
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #200
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r7
+ adcs r1, r1, r4
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r1, r1, r8
+ add r8, sp, #184
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #224
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adc r1, r1, r2
+ str r1, [sp, #60] @ 4-byte Spill
+ ldm r8, {r2, r7, r8}
+ ldr r6, [sp, #196]
+ adds r4, r0, r2
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mul r1, r4, r0
+ ldr r0, [sp, #240]
+ str r1, [sp, #52] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #236]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r5, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #112] @ 4-byte Reload
+ adcs r11, r11, r7
+ ldr r7, [sp, #108] @ 4-byte Reload
+ adcs r8, r7, r8
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adcs r6, r7, r6
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r7, r0, r5
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r5, [sp, #116] @ 4-byte Reload
+ adcs r9, r0, r9
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ add r0, sp, #120
+ bl .LmulPv448x32(PLT)
+ add r3, sp, #120
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r4, r0
+ mov r4, r5
+ adcs r11, r11, r1
+ ldr r0, [sp, #136]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r2, r8, r2
+ str r11, [sp, #44] @ 4-byte Spill
+ adcs lr, r6, r3
+ str r2, [sp, #52] @ 4-byte Spill
+ str lr, [sp, #60] @ 4-byte Spill
+ adcs r8, r1, r0
+ ldr r0, [sp, #140]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r8, [sp, #64] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #144]
+ adcs r0, r1, r0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #148]
+ adcs r0, r1, r0
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #152]
+ adcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #156]
+ adcs r10, r1, r0
+ ldr r0, [sp, #160]
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r10, [sp, #68] @ 4-byte Spill
+ adcs r0, r7, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #164]
+ adcs r0, r9, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #168]
+ adcs r0, r1, r0
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #172]
+ adcs r0, r1, r0
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #176]
+ adc r0, r1, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldmib r4, {r0, r1, r7, r9, r12}
+ ldr r6, [r4]
+ ldr r3, [r4, #24]
+ ldr r5, [r4, #28]
+ subs r6, r11, r6
+ str r3, [sp, #72] @ 4-byte Spill
+ add r11, r4, #32
+ sbcs r3, r2, r0
+ sbcs r2, lr, r1
+ ldm r11, {r0, r1, r11}
+ sbcs lr, r8, r7
+ ldr r7, [r4, #44]
+ ldr r8, [r4, #52]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r4, #48]
+ ldr r4, [sp, #80] @ 4-byte Reload
+ sbcs r9, r4, r9
+ ldr r4, [sp, #84] @ 4-byte Reload
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [sp, #72] @ 4-byte Reload
+ sbcs r12, r4, r12
+ ldr r4, [sp, #88] @ 4-byte Reload
+ sbcs r4, r4, r7
+ ldr r7, [sp, #92] @ 4-byte Reload
+ sbcs r5, r7, r5
+ sbcs r7, r10, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ sbcs r10, r0, r1
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r11, r0, r11
+ ldr r0, [sp, #104] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs r0, r0, r1
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ sbc r8, r0, r8
+ ldr r0, [sp, #44] @ 4-byte Reload
+ asr r1, r8, #31
+ cmp r1, #0
+ movlt r6, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r6, [r0]
+ ldr r6, [sp, #52] @ 4-byte Reload
+ movlt r3, r6
+ str r3, [r0, #4]
+ ldr r3, [sp, #60] @ 4-byte Reload
+ movlt r2, r3
+ cmp r1, #0
+ ldr r3, [sp, #72] @ 4-byte Reload
+ str r2, [r0, #8]
+ ldr r2, [sp, #64] @ 4-byte Reload
+ movlt lr, r2
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str lr, [r0, #12]
+ movlt r9, r2
+ ldr r2, [sp, #84] @ 4-byte Reload
+ str r9, [r0, #16]
+ movlt r12, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ cmp r1, #0
+ str r12, [r0, #20]
+ movlt r4, r2
+ ldr r2, [sp, #92] @ 4-byte Reload
+ str r4, [r0, #24]
+ movlt r5, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r5, [r0, #28]
+ movlt r7, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ cmp r1, #0
+ str r7, [r0, #32]
+ movlt r10, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ str r10, [r0, #36]
+ movlt r11, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r11, [r0, #40]
+ movlt r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r3, [r0, #44]
+ movlt r2, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r2, [r0, #48]
+ movlt r8, r1
+ str r8, [r0, #52]
+ add sp, sp, #892
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end211:
+ .size mcl_fp_montNF14L, .Lfunc_end211-mcl_fp_montNF14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed14L
+ .align 2
+ .type mcl_fp_montRed14L,%function
+mcl_fp_montRed14L: @ @mcl_fp_montRed14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #68
+ sub sp, sp, #68
+ .pad #1024
+ sub sp, sp, #1024
+ mov r3, r2
+ str r0, [sp, #180] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r6, [r1]
+ ldr r0, [r3]
+ str r3, [sp, #184] @ 4-byte Spill
+ str r2, [sp, #88] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #176] @ 4-byte Spill
+ ldr r0, [r3, #4]
+ str r2, [sp, #84] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #172] @ 4-byte Spill
+ ldr r0, [r3, #8]
+ str r2, [sp, #80] @ 4-byte Spill
+ str r0, [sp, #168] @ 4-byte Spill
+ ldr r0, [r3, #12]
+ str r0, [sp, #152] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ str r0, [sp, #156] @ 4-byte Spill
+ ldr r0, [r3, #20]
+ str r0, [sp, #160] @ 4-byte Spill
+ ldr r0, [r3, #24]
+ str r0, [sp, #164] @ 4-byte Spill
+ ldr r0, [r3, #-4]
+ str r0, [sp, #188] @ 4-byte Spill
+ mul r2, r6, r0
+ ldr r0, [r3, #28]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #144] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #148] @ 4-byte Spill
+ ldr r0, [r1, #96]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [r1, #100]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [r1, #104]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [r1, #108]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #72]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #80]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r1, #84]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r1, #88]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r1, #92]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [r1, #76]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ mov r1, r3
+ str r0, [sp, #12] @ 4-byte Spill
+ add r0, sp, #1024
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1080]
+ ldr r8, [sp, #1024]
+ ldr r1, [sp, #1032]
+ ldr r2, [sp, #1036]
+ ldr r3, [sp, #1040]
+ ldr r12, [sp, #1044]
+ ldr lr, [sp, #1048]
+ ldr r4, [sp, #1052]
+ ldr r5, [sp, #1056]
+ ldr r7, [sp, #1060]
+ ldr r9, [sp, #1064]
+ ldr r10, [sp, #1068]
+ ldr r11, [sp, #1072]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1076]
+ adds r6, r6, r8
+ ldr r6, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ adcs r8, r6, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #184] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #188] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, sp, #960
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #1016]
+ add lr, sp, #996
+ add r10, sp, #964
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1012]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r4, r5, r12, lr}
+ ldr r6, [sp, #960]
+ ldr r7, [sp, #992]
+ ldr r11, [sp, #988]
+ ldr r3, [sp, #984]
+ ldm r10, {r0, r1, r2, r9, r10}
+ adds r6, r8, r6
+ ldr r6, [sp, #88] @ 4-byte Reload
+ adcs r8, r6, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r6, [sp, #188] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r8, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #184] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #896
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #952]
+ add r10, sp, #924
+ add lr, sp, #900
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #948]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #944]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #940]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldr r4, [sp, #896]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r4, r11, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r4, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #832
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #888]
+ add lr, sp, #872
+ add r11, sp, #832
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #884]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm lr, {r5, r12, lr}
+ ldr r6, [sp, #868]
+ ldr r7, [sp, #864]
+ ldm r11, {r0, r1, r2, r3, r8, r9, r10, r11}
+ adds r0, r4, r0
+ ldr r4, [sp, #188] @ 4-byte Reload
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r1
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #184] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r4
+ mov r1, r5
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #768
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #824]
+ add r10, sp, #796
+ add lr, sp, #784
+ add r9, sp, #768
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #820]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #816]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r10}
+ ldm lr, {r3, r12, lr}
+ ldm r9, {r0, r1, r2, r9}
+ adds r0, r11, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ mov r10, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r4
+ mov r1, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #704
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #760]
+ add lr, sp, #744
+ add r9, sp, #708
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #756]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm lr, {r5, r12, lr}
+ ldr r4, [sp, #704]
+ ldr r6, [sp, #740]
+ ldr r7, [sp, #736]
+ ldr r11, [sp, #732]
+ ldr r3, [sp, #728]
+ ldm r9, {r0, r1, r2, r8, r9}
+ adds r4, r10, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #188] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ mul r2, r4, r5
+ ldr r4, [sp, #184] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #640
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #696]
+ add r10, sp, #664
+ add lr, sp, #640
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #688]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #684]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ mov r10, r1
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r5
+ mov r1, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #576
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #632]
+ add lr, sp, #616
+ add r9, sp, #580
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #628]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm lr, {r5, r12, lr}
+ ldr r4, [sp, #576]
+ ldr r6, [sp, #612]
+ ldr r7, [sp, #608]
+ ldr r11, [sp, #604]
+ ldr r3, [sp, #600]
+ ldm r9, {r0, r1, r2, r8, r9}
+ adds r4, r10, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r10, r4, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r4, [sp, #188] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #184] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r9
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #512
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #568]
+ add r11, sp, #536
+ add lr, sp, #512
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #564]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r5, r6, r7, r8, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r10, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ mov r5, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #448
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #504]
+ add lr, sp, #484
+ add r9, sp, #452
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #500]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #496]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm lr, {r6, r12, lr}
+ ldr r4, [sp, #448]
+ ldr r7, [sp, #480]
+ ldr r11, [sp, #476]
+ ldr r3, [sp, #472]
+ ldm r9, {r0, r1, r2, r8, r9}
+ adds r4, r10, r4
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r10, r4, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r4, [sp, #188] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #384
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #440]
+ add r11, sp, #408
+ add lr, sp, #384
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #436]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #432]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r11, {r5, r6, r7, r8, r9, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r10, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #184] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r7
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #320
+ bl .LmulPv448x32(PLT)
+ ldr r0, [sp, #376]
+ add r9, sp, #348
+ ldr r11, [sp, #364]
+ ldr r8, [sp, #360]
+ add lr, sp, #328
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #372]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r9, {r4, r6, r9}
+ ldr r3, [sp, #320]
+ ldr r5, [sp, #324]
+ ldm lr, {r0, r1, r2, r12, lr}
+ adds r3, r10, r3
+ ldr r3, [sp, #88] @ 4-byte Reload
+ adcs r5, r3, r5
+ ldr r3, [sp, #84] @ 4-byte Reload
+ adcs r10, r3, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #188] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ mul r2, r5, r6
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r7
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r11
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #256
+ bl .LmulPv448x32(PLT)
+ add r7, sp, #256
+ add r12, sp, #272
+ ldm r7, {r0, r1, r3, r7}
+ ldr r9, [sp, #312]
+ ldr r8, [sp, #308]
+ ldr lr, [sp, #304]
+ adds r0, r5, r0
+ ldr r5, [sp, #300]
+ adcs r10, r10, r1
+ mul r0, r10, r6
+ ldr r6, [sp, #296]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #188] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #292]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r4, [sp, #120] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r11
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r4, r0, r2
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r8, r0, r9
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ add r0, sp, #192
+ bl .LmulPv448x32(PLT)
+ add r3, sp, #192
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r10, r0
+ ldr r0, [sp, #188] @ 4-byte Reload
+ adcs lr, r0, r1
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str lr, [sp, #72] @ 4-byte Spill
+ adcs r2, r0, r2
+ ldr r0, [sp, #44] @ 4-byte Reload
+ str r2, [sp, #76] @ 4-byte Spill
+ adcs r3, r0, r3
+ ldr r0, [sp, #208]
+ str r3, [sp, #80] @ 4-byte Spill
+ adcs r7, r1, r0
+ ldr r0, [sp, #212]
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r7, [sp, #84] @ 4-byte Spill
+ adcs r4, r4, r0
+ ldr r0, [sp, #216]
+ str r4, [sp, #88] @ 4-byte Spill
+ adcs r5, r1, r0
+ ldr r0, [sp, #220]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r5, [sp, #92] @ 4-byte Spill
+ adcs r6, r1, r0
+ ldr r0, [sp, #224]
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r6, [sp, #96] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #184] @ 4-byte Spill
+ ldr r0, [sp, #228]
+ adcs r11, r1, r0
+ ldr r0, [sp, #232]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r11, [sp, #100] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #188] @ 4-byte Spill
+ ldr r0, [sp, #236]
+ adcs r10, r1, r0
+ ldr r0, [sp, #240]
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r10, [sp, #108] @ 4-byte Spill
+ adcs r9, r1, r0
+ ldr r0, [sp, #244]
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r9, [sp, #116] @ 4-byte Spill
+ adcs r8, r8, r0
+ ldr r0, [sp, #248]
+ str r8, [sp, #120] @ 4-byte Spill
+ adcs r12, r1, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #172] @ 4-byte Reload
+ str r12, [sp, #112] @ 4-byte Spill
+ adc r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #176] @ 4-byte Reload
+ subs r0, lr, r0
+ sbcs r1, r2, r1
+ ldr r2, [sp, #168] @ 4-byte Reload
+ sbcs r2, r3, r2
+ ldr r3, [sp, #152] @ 4-byte Reload
+ sbcs r3, r7, r3
+ ldr r7, [sp, #156] @ 4-byte Reload
+ sbcs lr, r4, r7
+ ldr r4, [sp, #160] @ 4-byte Reload
+ ldr r7, [sp, #184] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #164] @ 4-byte Reload
+ sbcs r5, r6, r5
+ ldr r6, [sp, #124] @ 4-byte Reload
+ sbcs r6, r7, r6
+ ldr r7, [sp, #128] @ 4-byte Reload
+ sbcs r7, r11, r7
+ ldr r11, [sp, #188] @ 4-byte Reload
+ str r7, [sp, #172] @ 4-byte Spill
+ ldr r7, [sp, #132] @ 4-byte Reload
+ sbcs r11, r11, r7
+ ldr r7, [sp, #136] @ 4-byte Reload
+ sbcs r7, r10, r7
+ str r7, [sp, #176] @ 4-byte Spill
+ ldr r7, [sp, #140] @ 4-byte Reload
+ sbcs r9, r9, r7
+ ldr r7, [sp, #144] @ 4-byte Reload
+ sbcs r10, r8, r7
+ ldr r7, [sp, #148] @ 4-byte Reload
+ sbcs r8, r12, r7
+ ldr r7, [sp, #104] @ 4-byte Reload
+ sbc r7, r7, #0
+ ands r12, r7, #1
+ ldr r7, [sp, #72] @ 4-byte Reload
+ movne r0, r7
+ ldr r7, [sp, #180] @ 4-byte Reload
+ str r0, [r7]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r1, [r7, #4]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ movne r2, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ cmp r12, #0
+ str r2, [r7, #8]
+ movne r3, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r3, [r7, #12]
+ movne lr, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str lr, [r7, #16]
+ movne r4, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ cmp r12, #0
+ str r4, [r7, #20]
+ movne r5, r0
+ ldr r0, [sp, #184] @ 4-byte Reload
+ str r5, [r7, #24]
+ movne r6, r0
+ ldr r0, [sp, #172] @ 4-byte Reload
+ movne r0, r1
+ str r6, [r7, #28]
+ cmp r12, #0
+ str r0, [r7, #32]
+ ldr r0, [sp, #188] @ 4-byte Reload
+ movne r11, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ str r11, [r7, #36]
+ ldr r11, [sp, #176] @ 4-byte Reload
+ movne r11, r0
+ ldr r0, [sp, #116] @ 4-byte Reload
+ str r11, [r7, #40]
+ movne r9, r0
+ ldr r0, [sp, #120] @ 4-byte Reload
+ cmp r12, #0
+ str r9, [r7, #44]
+ movne r10, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ str r10, [r7, #48]
+ movne r8, r0
+ str r8, [r7, #52]
+ add sp, sp, #68
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end212:
+ .size mcl_fp_montRed14L, .Lfunc_end212-mcl_fp_montRed14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre14L
+ .align 2
+ .type mcl_fp_addPre14L,%function
+mcl_fp_addPre14L: @ @mcl_fp_addPre14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ ldm r1, {r3, r12, lr}
+ ldr r9, [r1, #12]
+ ldmib r2, {r5, r6, r7}
+ ldr r11, [r2]
+ ldr r4, [r2, #16]
+ ldr r10, [r1, #44]
+ adds r8, r11, r3
+ ldr r3, [r2, #32]
+ str r4, [sp, #4] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ ldr r11, [r1, #48]
+ adcs r5, r5, r12
+ add r12, r1, #16
+ adcs r6, r6, lr
+ ldr lr, [r1, #40]
+ adcs r7, r7, r9
+ ldr r9, [r1, #52]
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ str r3, [sp, #24] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r4, [r2, #28]
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r4, [sp, #20] @ 4-byte Spill
+ ldr r4, [r1, #32]
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #48]
+ ldr r2, [r2, #52]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #36]
+ str r3, [sp, #36] @ 4-byte Spill
+ str r2, [sp] @ 4-byte Spill
+ ldm r12, {r1, r2, r3, r12}
+ str r8, [r0]
+ stmib r0, {r5, r6}
+ str r7, [r0, #12]
+ ldr r5, [sp, #4] @ 4-byte Reload
+ ldr r7, [sp, #32] @ 4-byte Reload
+ ldr r6, [sp, #36] @ 4-byte Reload
+ adcs r1, r5, r1
+ ldr r5, [sp, #8] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r2, r5, r2
+ ldr r5, [sp, #40] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ adcs r1, r1, r3
+ ldr r3, [sp] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r2, r2, r12
+ add r12, r0, #32
+ str r2, [r0, #28]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r4
+ adcs r2, r2, r3
+ ldr r3, [sp, #28] @ 4-byte Reload
+ adcs r3, r3, lr
+ adcs r7, r7, r10
+ adcs r6, r6, r11
+ stm r12, {r1, r2, r3, r7}
+ adcs r5, r5, r9
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #44
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end213:
+ .size mcl_fp_addPre14L, .Lfunc_end213-mcl_fp_addPre14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre14L
+ .align 2
+ .type mcl_fp_subPre14L,%function
+mcl_fp_subPre14L: @ @mcl_fp_subPre14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ ldmib r2, {r10, r11}
+ ldr r3, [r2, #16]
+ ldr r7, [r1]
+ ldr r6, [r2, #12]
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #20]
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #24]
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [r2]
+ ldmib r1, {r4, r5, r12}
+ subs lr, r7, r3
+ ldr r3, [r2, #32]
+ sbcs r4, r4, r10
+ sbcs r5, r5, r11
+ add r11, r1, #32
+ sbcs r6, r12, r6
+ add r12, r1, #16
+ str r3, [sp, #4] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ str r3, [sp, #8] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r3, [sp, #12] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r3, [sp, #16] @ 4-byte Spill
+ ldr r3, [r2, #48]
+ ldr r2, [r2, #52]
+ str r3, [sp, #20] @ 4-byte Spill
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm r11, {r7, r10, r11}
+ ldr r2, [r1, #52]
+ ldr r8, [r1, #44]
+ ldr r9, [r1, #48]
+ str r2, [sp] @ 4-byte Spill
+ ldm r12, {r1, r2, r3, r12}
+ str lr, [r0]
+ stmib r0, {r4, r5}
+ str r6, [r0, #12]
+ ldr r5, [sp, #28] @ 4-byte Reload
+ ldr r6, [sp, #32] @ 4-byte Reload
+ ldr r4, [sp] @ 4-byte Reload
+ sbcs r1, r1, r5
+ ldr r5, [sp, #24] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ sbcs r2, r2, r6
+ ldr r6, [sp, #20] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #40] @ 4-byte Reload
+ sbcs r1, r3, r1
+ ldr r3, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ sbcs r2, r12, r2
+ add r12, r0, #32
+ str r2, [r0, #28]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ sbcs r1, r7, r1
+ ldr r7, [sp, #16] @ 4-byte Reload
+ sbcs r2, r10, r2
+ sbcs r3, r11, r3
+ sbcs r7, r8, r7
+ sbcs r6, r9, r6
+ stm r12, {r1, r2, r3, r7}
+ sbcs r5, r4, r5
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #44
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end214:
+ .size mcl_fp_subPre14L, .Lfunc_end214-mcl_fp_subPre14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_14L
+ .align 2
+ .type mcl_fp_shr1_14L,%function
+mcl_fp_shr1_14L: @ @mcl_fp_shr1_14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #16
+ sub sp, sp, #16
+ add r9, r1, #8
+ add r12, r1, #32
+ ldm r9, {r2, r3, r4, r5, r6, r9}
+ ldm r1, {r7, lr}
+ str r7, [sp, #4] @ 4-byte Spill
+ lsr r7, lr, #1
+ orr r7, r7, r2, lsl #31
+ str r7, [sp] @ 4-byte Spill
+ ldm r12, {r7, r11, r12}
+ ldr r10, [r1, #48]
+ ldr r8, [r1, #44]
+ ldr r1, [r1, #52]
+ str r1, [sp, #12] @ 4-byte Spill
+ lsr r1, r3, #1
+ lsrs r3, r3, #1
+ str r10, [sp, #8] @ 4-byte Spill
+ rrx r2, r2
+ lsrs r3, lr, #1
+ orr r1, r1, r4, lsl #31
+ ldr r3, [sp, #4] @ 4-byte Reload
+ rrx r3, r3
+ str r3, [r0]
+ ldr r3, [sp] @ 4-byte Reload
+ str r3, [r0, #4]
+ str r2, [r0, #8]
+ str r1, [r0, #12]
+ lsrs r1, r5, #1
+ lsr r2, r11, #1
+ rrx r1, r4
+ ldr r4, [sp, #8] @ 4-byte Reload
+ orr r2, r2, r12, lsl #31
+ str r1, [r0, #16]
+ lsr r1, r5, #1
+ ldr r5, [sp, #12] @ 4-byte Reload
+ orr r1, r1, r6, lsl #31
+ str r1, [r0, #20]
+ lsrs r1, r9, #1
+ rrx r1, r6
+ str r1, [r0, #24]
+ lsr r1, r9, #1
+ orr r1, r1, r7, lsl #31
+ str r1, [r0, #28]
+ lsrs r1, r11, #1
+ rrx r1, r7
+ lsrs r3, r8, #1
+ lsr r7, r8, #1
+ rrx r3, r12
+ lsrs r6, r5, #1
+ orr r7, r7, r4, lsl #31
+ add r12, r0, #32
+ lsr r5, r5, #1
+ rrx r6, r4
+ stm r12, {r1, r2, r3, r7}
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ add sp, sp, #16
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end215:
+ .size mcl_fp_shr1_14L, .Lfunc_end215-mcl_fp_shr1_14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add14L
+ .align 2
+ .type mcl_fp_add14L,%function
+mcl_fp_add14L: @ @mcl_fp_add14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #52
+ sub sp, sp, #52
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r7}
+ adds r9, r4, r9
+ ldr r4, [r1, #24]
+ adcs r10, r5, r8
+ ldr r5, [r1, #20]
+ str r9, [r0]
+ adcs r6, r6, lr
+ mov lr, r10
+ adcs r7, r7, r12
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [r1, #16]
+ str lr, [r0, #4]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r2, #16]
+ adcs r7, r7, r6
+ ldr r6, [r2, #44]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ adcs r7, r7, r5
+ ldr r5, [r2, #28]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r10, [sp, #16] @ 4-byte Reload
+ adcs r7, r7, r4
+ ldr r4, [sp, #32] @ 4-byte Reload
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r1, #28]
+ str r4, [r0, #8]
+ adcs r7, r5, r7
+ ldr r5, [r2, #32]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ adcs r7, r5, r7
+ ldr r5, [r2, #36]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r1, #36]
+ adcs r11, r5, r7
+ ldr r7, [r1, #40]
+ ldr r5, [r2, #40]
+ str r11, [sp, #24] @ 4-byte Spill
+ adcs r8, r5, r7
+ ldr r7, [r1, #44]
+ ldr r5, [sp, #28] @ 4-byte Reload
+ str r8, [sp, #12] @ 4-byte Spill
+ adcs r12, r6, r7
+ ldr r7, [r1, #48]
+ ldr r6, [r2, #48]
+ ldr r1, [r1, #52]
+ ldr r2, [r2, #52]
+ str r5, [r0, #12]
+ str r12, [sp, #8] @ 4-byte Spill
+ adcs r6, r6, r7
+ adcs r2, r2, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r10, [r0, #20]
+ str r1, [r0, #24]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r1, [r0, #28]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r1, [r0, #32]
+ str r11, [r0, #36]
+ str r8, [r0, #40]
+ str r12, [r0, #44]
+ str r6, [r0, #48]
+ str r2, [r0, #52]
+ mov r8, r2
+ mov r2, #0
+ mov r12, r6
+ add r11, r3, #32
+ adc r1, r2, #0
+ str r1, [sp, #20] @ 4-byte Spill
+ ldm r3, {r6, r7}
+ ldr r1, [r3, #8]
+ ldr r2, [r3, #12]
+ subs r6, r9, r6
+ sbcs r7, lr, r7
+ str r6, [sp, #4] @ 4-byte Spill
+ sbcs r1, r4, r1
+ str r7, [sp] @ 4-byte Spill
+ str r1, [sp, #32] @ 4-byte Spill
+ sbcs r1, r5, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ sbcs r10, r10, r1
+ ldr r1, [r3, #24]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ sbcs r5, r2, r1
+ ldm r11, {r1, r2, r6, r7, r11}
+ ldr r9, [r3, #52]
+ ldr r3, [sp, #40] @ 4-byte Reload
+ sbcs r3, r3, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ sbcs lr, r1, r2
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ sbcs r4, r1, r6
+ ldr r1, [sp, #8] @ 4-byte Reload
+ sbcs r7, r1, r7
+ sbcs r6, r12, r11
+ sbcs r1, r8, r9
+ sbc r2, r2, #0
+ tst r2, #1
+ bne .LBB216_2
+@ BB#1: @ %nocarry
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r2, [r0]
+ ldr r2, [sp] @ 4-byte Reload
+ str r2, [r0, #4]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r2, [r0, #8]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r2, [r0, #12]
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r2, [r0, #16]
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r10, [r0, #20]
+ str r2, [r0, #24]
+ str r5, [r0, #28]
+ str r3, [r0, #32]
+ str lr, [r0, #36]
+ str r4, [r0, #40]
+ str r7, [r0, #44]
+ str r6, [r0, #48]
+ str r1, [r0, #52]
+.LBB216_2: @ %carry
+ add sp, sp, #52
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end216:
+ .size mcl_fp_add14L, .Lfunc_end216-mcl_fp_add14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF14L
+ .align 2
+ .type mcl_fp_addNF14L,%function
+mcl_fp_addNF14L: @ @mcl_fp_addNF14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #80
+ sub sp, sp, #80
+ ldm r1, {r7, r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r10}
+ adds r4, r4, r7
+ ldr r7, [r2, #16]
+ adcs r5, r5, r8
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r1, #24]
+ adcs lr, r6, lr
+ ldr r6, [r1, #16]
+ str r5, [sp, #40] @ 4-byte Spill
+ ldr r5, [r1, #20]
+ adcs r9, r10, r12
+ str lr, [sp, #12] @ 4-byte Spill
+ str r9, [sp, #16] @ 4-byte Spill
+ adcs r7, r7, r6
+ ldr r6, [r2, #20]
+ str r7, [sp, #44] @ 4-byte Spill
+ adcs r7, r6, r5
+ ldr r6, [r2, #24]
+ ldr r5, [r2, #28]
+ str r7, [sp, #48] @ 4-byte Spill
+ adcs r8, r6, r4
+ ldr r6, [r1, #28]
+ str r8, [sp, #20] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #32]
+ ldr r5, [r2, #32]
+ str r7, [sp, #52] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #36]
+ ldr r5, [r2, #36]
+ str r7, [sp, #56] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #40]
+ ldr r5, [r2, #40]
+ str r7, [sp, #68] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #44]
+ ldr r5, [r2, #44]
+ str r7, [sp, #64] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r6, [r1, #48]
+ ldr r5, [r2, #48]
+ ldr r1, [r1, #52]
+ ldr r2, [r2, #52]
+ str r7, [sp, #60] @ 4-byte Spill
+ adcs r7, r5, r6
+ adc r1, r2, r1
+ str r7, [sp, #76] @ 4-byte Spill
+ str r1, [sp, #72] @ 4-byte Spill
+ ldmib r3, {r1, r4, r6}
+ ldr r2, [r3, #24]
+ ldr r7, [r3]
+ ldr r5, [r3, #16]
+ ldr r11, [r3, #20]
+ ldr r10, [r3, #40]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r3, #28]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [sp, #36] @ 4-byte Reload
+ subs r7, r2, r7
+ ldr r2, [sp, #40] @ 4-byte Reload
+ sbcs r2, r2, r1
+ ldr r1, [r3, #36]
+ sbcs r12, lr, r4
+ sbcs lr, r9, r6
+ ldr r9, [r3, #32]
+ ldr r6, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ sbcs r5, r1, r5
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r3, r1, r11
+ ldr r1, [sp, #28] @ 4-byte Reload
+ sbcs r4, r8, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ sbcs r8, r1, r6
+ ldr r1, [sp, #56] @ 4-byte Reload
+ ldr r6, [sp, #24] @ 4-byte Reload
+ sbcs r11, r1, r9
+ ldr r1, [sp, #68] @ 4-byte Reload
+ sbcs r9, r1, r6
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldr r6, [sp] @ 4-byte Reload
+ sbcs r1, r1, r10
+ ldr r10, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp, #8] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ sbc r6, r1, r6
+ asr r1, r6, #31
+ cmp r1, #0
+ movlt r7, r10
+ str r7, [r0]
+ ldr r7, [sp, #40] @ 4-byte Reload
+ movlt r2, r7
+ str r2, [r0, #4]
+ ldr r2, [sp, #12] @ 4-byte Reload
+ movlt r12, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ cmp r1, #0
+ str r12, [r0, #8]
+ movlt lr, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str lr, [r0, #12]
+ movlt r5, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r5, [r0, #16]
+ movlt r3, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #20]
+ ldr r3, [sp, #24] @ 4-byte Reload
+ movlt r4, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r4, [r0, #24]
+ movlt r8, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r8, [r0, #28]
+ movlt r11, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ cmp r1, #0
+ str r11, [r0, #32]
+ movlt r9, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r9, [r0, #36]
+ movlt r3, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r3, [r0, #40]
+ ldr r3, [sp, #28] @ 4-byte Reload
+ movlt r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r3, [r0, #44]
+ movlt r2, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r2, [r0, #48]
+ movlt r6, r1
+ str r6, [r0, #52]
+ add sp, sp, #80
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end217:
+ .size mcl_fp_addNF14L, .Lfunc_end217-mcl_fp_addNF14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub14L
+ .align 2
+ .type mcl_fp_sub14L,%function
+mcl_fp_sub14L: @ @mcl_fp_sub14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ ldr r9, [r2]
+ ldmib r2, {r8, lr}
+ ldr r5, [r1]
+ ldr r12, [r2, #12]
+ ldmib r1, {r4, r6, r7}
+ subs r5, r5, r9
+ sbcs r4, r4, r8
+ str r5, [sp, #52] @ 4-byte Spill
+ ldr r5, [r2, #24]
+ sbcs r6, r6, lr
+ str r4, [sp, #48] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ sbcs r7, r7, r12
+ str r6, [sp, #56] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ sbcs r8, r7, r6
+ ldr r7, [r1, #20]
+ ldr r6, [r1, #28]
+ str r8, [sp, #40] @ 4-byte Spill
+ sbcs r10, r7, r4
+ ldr r7, [r1, #24]
+ ldr r4, [r1, #40]
+ str r10, [sp, #36] @ 4-byte Spill
+ sbcs r9, r7, r5
+ ldr r7, [r2, #28]
+ sbcs r11, r6, r7
+ ldr r7, [r2, #32]
+ ldr r6, [r1, #32]
+ str r11, [sp, #32] @ 4-byte Spill
+ sbcs r12, r6, r7
+ ldr r7, [r2, #36]
+ ldr r6, [r1, #36]
+ str r12, [sp, #28] @ 4-byte Spill
+ sbcs r6, r6, r7
+ ldr r7, [r2, #40]
+ sbcs r5, r4, r7
+ ldr r7, [r2, #44]
+ ldr r4, [r1, #44]
+ str r5, [sp, #24] @ 4-byte Spill
+ sbcs lr, r4, r7
+ ldr r4, [r2, #48]
+ ldr r7, [r1, #48]
+ ldr r2, [r2, #52]
+ ldr r1, [r1, #52]
+ sbcs r7, r7, r4
+ ldr r4, [sp, #44] @ 4-byte Reload
+ sbcs r2, r1, r2
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r1, [r0]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r1, [r0, #4]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r1, [r0, #8]
+ str r4, [r0, #12]
+ str r8, [r0, #16]
+ mov r1, lr
+ add r8, r0, #24
+ str r10, [r0, #20]
+ stm r8, {r9, r11, r12}
+ str r6, [r0, #36]
+ str r5, [r0, #40]
+ str r1, [r0, #44]
+ str r7, [r0, #48]
+ mov r8, r2
+ str r2, [r0, #52]
+ mov r2, #0
+ sbc r2, r2, #0
+ tst r2, #1
+ beq .LBB218_2
+@ BB#1: @ %carry
+ ldr r2, [r3, #52]
+ ldr r5, [r3, #48]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [sp, #52] @ 4-byte Reload
+ ldr lr, [r3, #4]
+ ldr r12, [r3, #8]
+ ldr r10, [r3, #12]
+ ldr r11, [r3, #40]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r3, #16]
+ str r5, [sp, #52] @ 4-byte Spill
+ ldr r5, [sp, #48] @ 4-byte Reload
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r3, #20]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r3, #24]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r3, #28]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r3]
+ adds r2, r2, r7
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adcs lr, lr, r5
+ ldr r5, [r3, #44]
+ adcs r7, r12, r7
+ add r12, r0, #32
+ str r5, [sp, #48] @ 4-byte Spill
+ adcs r5, r10, r4
+ ldr r10, [r3, #36]
+ ldr r3, [r3, #32]
+ stm r0, {r2, lr}
+ str r7, [r0, #8]
+ ldr r2, [sp, #40] @ 4-byte Reload
+ ldr r7, [sp] @ 4-byte Reload
+ ldr r4, [sp, #36] @ 4-byte Reload
+ str r5, [r0, #12]
+ ldr r5, [sp, #52] @ 4-byte Reload
+ adcs r2, r7, r2
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r2, [r0, #16]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ adcs r4, r7, r4
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adcs r2, r2, r9
+ str r4, [r0, #20]
+ str r2, [r0, #24]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs r2, r7, r2
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r2, [r0, #28]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r2, r3, r2
+ adcs r3, r10, r6
+ ldr r6, [sp, #48] @ 4-byte Reload
+ adcs r7, r11, r7
+ adcs r6, r6, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r5, r5, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ stm r12, {r2, r3, r7}
+ str r6, [r0, #44]
+ str r5, [r0, #48]
+ adc r1, r1, r8
+ str r1, [r0, #52]
+.LBB218_2: @ %nocarry
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end218:
+ .size mcl_fp_sub14L, .Lfunc_end218-mcl_fp_sub14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF14L
+ .align 2
+ .type mcl_fp_subNF14L,%function
+mcl_fp_subNF14L: @ @mcl_fp_subNF14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #80
+ sub sp, sp, #80
+ mov r12, r0
+ ldr r0, [r2, #32]
+ add r7, r1, #16
+ ldr r9, [r2]
+ ldr r11, [r2, #20]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r2, #36]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r2, #40]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r2, #44]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r2, #48]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r2, #52]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [r2, #4]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r2, #8]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r2, #12]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r2, #16]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r2, #24]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r2, #28]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldmib r1, {r2, r8, lr}
+ ldm r7, {r4, r5, r6, r7}
+ ldr r10, [r1]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r1, [r1, #32]
+ subs r10, r10, r9
+ sbcs r9, r2, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r10, [sp] @ 4-byte Spill
+ str r9, [sp, #4] @ 4-byte Spill
+ sbcs r0, r8, r0
+ add r8, r3, #20
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ sbcs r0, lr, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbcs r0, r4, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ sbcs r0, r5, r11
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbcs r0, r7, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ sbcs r11, r1, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r11, [sp, #20] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ sbc r0, r1, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r3, {r2, r4, r6}
+ ldr r5, [r3, #12]
+ ldr lr, [r3, #16]
+ ldm r8, {r0, r7, r8}
+ ldr r3, [sp, #56] @ 4-byte Reload
+ adds r1, r10, r2
+ ldr r10, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r4, r9, r4
+ adcs r6, r10, r6
+ adcs r2, r2, r5
+ ldr r5, [sp, #60] @ 4-byte Reload
+ adcs r3, r3, lr
+ adcs lr, r5, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r5, r0, r7
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r7, [sp, #16] @ 4-byte Reload
+ adcs r8, r0, r8
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r9, r11, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r11, r0, r7
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r7, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r7, r0, r7
+ str r7, [sp, #36] @ 4-byte Spill
+ asr r7, r0, #31
+ ldr r0, [sp] @ 4-byte Reload
+ cmp r7, #0
+ movge r6, r10
+ movge r1, r0
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r1, [r12]
+ ldr r1, [sp, #24] @ 4-byte Reload
+ movge r4, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ cmp r7, #0
+ str r4, [r12, #4]
+ str r6, [r12, #8]
+ movge r2, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r2, [r12, #12]
+ movge r3, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ str r3, [r12, #16]
+ movge lr, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ cmp r7, #0
+ str lr, [r12, #20]
+ movge r5, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ str r5, [r12, #24]
+ movge r8, r0
+ ldr r0, [sp, #20] @ 4-byte Reload
+ str r8, [r12, #28]
+ movge r9, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ cmp r7, #0
+ str r9, [r12, #32]
+ movge r11, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r11, [r12, #36]
+ movge r1, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r1, [r12, #40]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ movge r1, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ cmp r7, #0
+ str r1, [r12, #44]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ movge r1, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ str r1, [r12, #48]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ movge r0, r1
+ str r0, [r12, #52]
+ add sp, sp, #80
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end219:
+ .size mcl_fp_subNF14L, .Lfunc_end219-mcl_fp_subNF14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add14L
+ .align 2
+ .type mcl_fpDbl_add14L,%function
+mcl_fpDbl_add14L: @ @mcl_fpDbl_add14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #168
+ sub sp, sp, #168
+ ldr r7, [r1]
+ ldmib r1, {r6, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r8, r9}
+ add r10, r1, #32
+ adds r4, r4, r7
+ str r4, [sp, #92] @ 4-byte Spill
+ ldr r4, [r2, #96]
+ str r4, [sp, #152] @ 4-byte Spill
+ ldr r4, [r2, #100]
+ str r4, [sp, #156] @ 4-byte Spill
+ ldr r4, [r2, #104]
+ str r4, [sp, #160] @ 4-byte Spill
+ ldr r4, [r2, #108]
+ str r4, [sp, #164] @ 4-byte Spill
+ adcs r4, r5, r6
+ adcs r7, r8, lr
+ str r4, [sp, #68] @ 4-byte Spill
+ add lr, r1, #16
+ str r7, [sp, #64] @ 4-byte Spill
+ adcs r7, r9, r12
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #128] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #100] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r2, #16]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #96]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #104]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #108]
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r10}
+ ldr r2, [r1, #56]
+ ldr r8, [r1, #48]
+ ldr r9, [r1, #52]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r7, [r0, #8]
+ ldr r7, [sp] @ 4-byte Reload
+ adcs r1, r7, r1
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r2, r7, r2
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r2, [r0, #20]
+ adcs r1, r1, r12
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [r0, #28]
+ adcs r1, r1, r4
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r2, r2, r5
+ str r2, [r0, #36]
+ adcs r1, r1, r6
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r1, [r0, #40]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r2, r2, r10
+ str r2, [r0, #44]
+ adcs r1, r1, r8
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str r1, [r0, #48]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r2, r2, r9
+ adcs r6, r1, r7
+ str r2, [r0, #52]
+ ldr r1, [sp, #100] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r6, [sp, #84] @ 4-byte Spill
+ adcs r5, r1, r2
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r5, [sp, #88] @ 4-byte Spill
+ adcs r4, r1, r2
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r4, [sp, #96] @ 4-byte Spill
+ adcs r7, r1, r2
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r7, [sp, #100] @ 4-byte Spill
+ adcs lr, r1, r2
+ ldr r1, [sp, #120] @ 4-byte Reload
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str lr, [sp, #92] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #136] @ 4-byte Spill
+ ldr r1, [sp, #140] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #140] @ 4-byte Spill
+ ldr r1, [sp, #144] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [sp, #132] @ 4-byte Reload
+ adcs r8, r1, r2
+ ldr r1, [sp, #148] @ 4-byte Reload
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r8, [sp, #124] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [sp, #152] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #152] @ 4-byte Spill
+ ldr r1, [sp, #156] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ str r1, [sp, #156] @ 4-byte Spill
+ ldr r1, [sp, #160] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r1, [sp, #160] @ 4-byte Spill
+ ldr r1, [sp, #164] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #164] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ str r1, [sp, #120] @ 4-byte Spill
+ ldmib r3, {r2, r12}
+ ldr r1, [r3, #16]
+ ldr r11, [r3]
+ ldr r9, [r3, #12]
+ ldr r10, [r3, #36]
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ subs r11, r6, r11
+ sbcs r2, r5, r2
+ sbcs r12, r4, r12
+ sbcs r4, r7, r9
+ ldr r7, [r3, #32]
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ ldr r6, [sp, #116] @ 4-byte Reload
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ ldr r5, [sp, #128] @ 4-byte Reload
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ sbcs r3, lr, r1
+ ldr r1, [sp, #136] @ 4-byte Reload
+ sbcs lr, r1, r6
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r6, [sp, #132] @ 4-byte Reload
+ sbcs r5, r1, r5
+ ldr r1, [sp, #144] @ 4-byte Reload
+ sbcs r6, r1, r6
+ ldr r1, [sp, #148] @ 4-byte Reload
+ sbcs r8, r8, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ sbcs r9, r1, r10
+ ldr r1, [sp, #152] @ 4-byte Reload
+ sbcs r10, r1, r7
+ ldr r1, [sp, #156] @ 4-byte Reload
+ ldr r7, [sp, #80] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [sp, #160] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [sp, #164] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ sbc r1, r1, #0
+ ands r1, r1, #1
+ movne r11, r7
+ ldr r7, [sp, #88] @ 4-byte Reload
+ str r11, [r0, #56]
+ movne r2, r7
+ ldr r7, [sp, #116] @ 4-byte Reload
+ str r2, [r0, #60]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ movne r12, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ cmp r1, #0
+ str r12, [r0, #64]
+ movne r4, r2
+ ldr r2, [sp, #92] @ 4-byte Reload
+ str r4, [r0, #68]
+ movne r3, r2
+ ldr r2, [sp, #136] @ 4-byte Reload
+ str r3, [r0, #72]
+ ldr r3, [sp, #128] @ 4-byte Reload
+ movne lr, r2
+ ldr r2, [sp, #140] @ 4-byte Reload
+ cmp r1, #0
+ str lr, [r0, #76]
+ movne r5, r2
+ ldr r2, [sp, #144] @ 4-byte Reload
+ str r5, [r0, #80]
+ movne r6, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ str r6, [r0, #84]
+ movne r8, r2
+ ldr r2, [sp, #148] @ 4-byte Reload
+ cmp r1, #0
+ str r8, [r0, #88]
+ movne r9, r2
+ ldr r2, [sp, #152] @ 4-byte Reload
+ str r9, [r0, #92]
+ movne r10, r2
+ ldr r2, [sp, #156] @ 4-byte Reload
+ str r10, [r0, #96]
+ movne r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #160] @ 4-byte Reload
+ ldr r2, [sp, #132] @ 4-byte Reload
+ str r3, [r0, #100]
+ movne r2, r1
+ ldr r1, [sp, #164] @ 4-byte Reload
+ str r2, [r0, #104]
+ movne r7, r1
+ str r7, [r0, #108]
+ add sp, sp, #168
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end220:
+ .size mcl_fpDbl_add14L, .Lfunc_end220-mcl_fpDbl_add14L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub14L
+ .align 2
+ .type mcl_fpDbl_sub14L,%function
+mcl_fpDbl_sub14L: @ @mcl_fpDbl_sub14L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #168
+ sub sp, sp, #168
+ ldr r7, [r2, #96]
+ add r9, r1, #32
+ str r7, [sp, #160] @ 4-byte Spill
+ ldr r7, [r2, #100]
+ str r7, [sp, #156] @ 4-byte Spill
+ ldr r7, [r2, #104]
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [r2, #108]
+ str r7, [sp, #164] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #128] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #116] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #108] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #104] @ 4-byte Spill
+ ldm r2, {r5, r8, r12, lr}
+ ldr r6, [r1]
+ ldmib r1, {r4, r7, r10}
+ subs r5, r6, r5
+ sbcs r4, r4, r8
+ str r5, [sp, #32] @ 4-byte Spill
+ ldr r5, [r2, #44]
+ sbcs r7, r7, r12
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [r2, #40]
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r5, [sp, #84] @ 4-byte Spill
+ str r4, [sp, #80] @ 4-byte Spill
+ str r7, [sp, #48] @ 4-byte Spill
+ sbcs r7, r10, lr
+ ldr r10, [r2, #16]
+ add lr, r1, #16
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r1, #96]
+ str r2, [sp, #88] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #92] @ 4-byte Spill
+ ldr r2, [r1, #104]
+ str r2, [sp, #96] @ 4-byte Spill
+ ldr r2, [r1, #108]
+ str r2, [sp, #100] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #76] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldm r9, {r4, r5, r6, r8, r9}
+ ldr r2, [r1, #52]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #32] @ 4-byte Reload
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #24] @ 4-byte Reload
+ sbcs r1, r1, r10
+ str r7, [r0, #8]
+ ldr r7, [sp, #20] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #36] @ 4-byte Reload
+ sbcs r1, r12, r1
+ str r1, [r0, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r2, lr, r2
+ add lr, r3, #8
+ str r2, [r0, #28]
+ ldr r2, [sp, #48] @ 4-byte Reload
+ sbcs r1, r4, r1
+ str r1, [r0, #32]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r2, r5, r2
+ str r2, [r0, #36]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ sbcs r1, r6, r1
+ str r1, [r0, #40]
+ ldr r1, [sp, #104] @ 4-byte Reload
+ sbcs r2, r8, r2
+ str r2, [r0, #44]
+ ldr r2, [sp, #108] @ 4-byte Reload
+ sbcs r1, r9, r1
+ str r1, [r0, #48]
+ ldr r1, [sp, #112] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r2, [r0, #52]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ sbcs r10, r7, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r10, [sp, #80] @ 4-byte Spill
+ sbcs r11, r2, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r11, [sp, #84] @ 4-byte Spill
+ sbcs r1, r2, r1
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #136] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #144] @ 4-byte Reload
+ str r1, [sp, #136] @ 4-byte Spill
+ mov r1, #0
+ sbcs r2, r7, r2
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r2, [sp, #128] @ 4-byte Spill
+ ldr r2, [sp, #120] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r2, [sp, #144] @ 4-byte Spill
+ ldr r2, [sp, #148] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r2, [sp, #148] @ 4-byte Spill
+ ldr r2, [sp, #152] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #72] @ 4-byte Reload
+ str r2, [sp, #152] @ 4-byte Spill
+ ldr r2, [sp, #124] @ 4-byte Reload
+ sbcs r9, r7, r2
+ ldr r2, [sp, #132] @ 4-byte Reload
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r9, [sp, #108] @ 4-byte Spill
+ sbcs r2, r7, r2
+ ldr r7, [sp, #88] @ 4-byte Reload
+ str r2, [sp, #132] @ 4-byte Spill
+ ldr r2, [sp, #160] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #92] @ 4-byte Reload
+ str r2, [sp, #160] @ 4-byte Spill
+ ldr r2, [sp, #156] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #96] @ 4-byte Reload
+ str r2, [sp, #156] @ 4-byte Spill
+ ldr r2, [sp, #140] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #100] @ 4-byte Reload
+ str r2, [sp, #140] @ 4-byte Spill
+ ldr r2, [sp, #164] @ 4-byte Reload
+ sbcs r2, r7, r2
+ sbc r1, r1, #0
+ str r2, [sp, #164] @ 4-byte Spill
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #124] @ 4-byte Spill
+ ldm r3, {r2, r5}
+ ldm lr, {r4, r6, lr}
+ ldr r7, [r3, #24]
+ ldr r8, [r3, #28]
+ ldr r12, [r3, #20]
+ ldr r3, [sp, #128] @ 4-byte Reload
+ adds r1, r10, r2
+ ldr r10, [sp, #104] @ 4-byte Reload
+ ldr r2, [sp, #136] @ 4-byte Reload
+ adcs r5, r11, r5
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r4, r10, r4
+ adcs r2, r2, r6
+ ldr r6, [sp, #144] @ 4-byte Reload
+ adcs r3, r3, lr
+ adcs r12, r6, r12
+ ldr r6, [sp, #148] @ 4-byte Reload
+ adcs lr, r6, r7
+ ldr r6, [sp, #152] @ 4-byte Reload
+ ldr r7, [sp, #132] @ 4-byte Reload
+ adcs r8, r6, r8
+ ldr r6, [sp, #92] @ 4-byte Reload
+ adcs r9, r9, r6
+ ldr r6, [sp, #96] @ 4-byte Reload
+ adcs r6, r7, r6
+ ldr r7, [sp, #160] @ 4-byte Reload
+ str r6, [sp, #96] @ 4-byte Spill
+ ldr r6, [sp, #112] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #116] @ 4-byte Reload
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [sp, #156] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #116] @ 4-byte Spill
+ ldr r7, [sp, #140] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #80] @ 4-byte Reload
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [sp, #164] @ 4-byte Reload
+ adc r7, r7, r11
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [sp, #100] @ 4-byte Reload
+ ands r7, r7, #1
+ moveq r1, r6
+ moveq r4, r10
+ ldr r6, [sp, #124] @ 4-byte Reload
+ str r1, [r0, #56]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ moveq r5, r1
+ ldr r1, [sp, #136] @ 4-byte Reload
+ cmp r7, #0
+ str r5, [r0, #60]
+ str r4, [r0, #64]
+ moveq r2, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r2, [r0, #68]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ moveq r3, r1
+ ldr r1, [sp, #144] @ 4-byte Reload
+ str r3, [r0, #72]
+ ldr r3, [sp, #116] @ 4-byte Reload
+ moveq r12, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ cmp r7, #0
+ str r12, [r0, #76]
+ moveq lr, r1
+ ldr r1, [sp, #152] @ 4-byte Reload
+ str lr, [r0, #80]
+ moveq r8, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r8, [r0, #84]
+ moveq r9, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ cmp r7, #0
+ str r9, [r0, #88]
+ moveq r2, r1
+ ldr r1, [sp, #160] @ 4-byte Reload
+ str r2, [r0, #92]
+ ldr r2, [sp, #112] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #156] @ 4-byte Reload
+ moveq r3, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ cmp r7, #0
+ ldr r7, [sp, #120] @ 4-byte Reload
+ moveq r7, r1
+ ldr r1, [sp, #164] @ 4-byte Reload
+ moveq r6, r1
+ add r1, r0, #96
+ stm r1, {r2, r3, r7}
+ str r6, [r0, #108]
+ add sp, sp, #168
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end221:
+ .size mcl_fpDbl_sub14L, .Lfunc_end221-mcl_fpDbl_sub14L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv480x32,%function
+.LmulPv480x32: @ @mulPv480x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r3, [r1, #32]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #32]
+ ldr r3, [r1, #36]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #36]
+ ldr r3, [r1, #40]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #40]
+ ldr r3, [r1, #44]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #44]
+ ldr r3, [r1, #48]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #48]
+ ldr r3, [r1, #52]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #52]
+ ldr r1, [r1, #56]
+ umull r3, r7, r1, r2
+ adcs r1, r5, r3
+ str r1, [r0, #56]
+ adc r1, r7, #0
+ str r1, [r0, #60]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end222:
+ .size .LmulPv480x32, .Lfunc_end222-.LmulPv480x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre15L
+ .align 2
+ .type mcl_fp_mulUnitPre15L,%function
+mcl_fp_mulUnitPre15L: @ @mcl_fp_mulUnitPre15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #84
+ sub sp, sp, #84
+ mov r4, r0
+ add r0, sp, #16
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #76]
+ add r11, sp, #48
+ add lr, sp, #20
+ ldr r9, [sp, #64]
+ ldr r10, [sp, #60]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #72]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #68]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r6, r8, r11}
+ ldr r7, [sp, #44]
+ ldr r5, [sp, #40]
+ ldr r1, [sp, #16]
+ ldm lr, {r0, r2, r3, r12, lr}
+ str r1, [r4]
+ stmib r4, {r0, r2, r3, r12, lr}
+ add r0, r4, #32
+ str r5, [r4, #24]
+ str r7, [r4, #28]
+ stm r0, {r6, r8, r11}
+ str r10, [r4, #44]
+ str r9, [r4, #48]
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r0, [r4, #52]
+ ldr r0, [sp, #8] @ 4-byte Reload
+ str r0, [r4, #56]
+ ldr r0, [sp, #12] @ 4-byte Reload
+ str r0, [r4, #60]
+ add sp, sp, #84
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end223:
+ .size mcl_fp_mulUnitPre15L, .Lfunc_end223-mcl_fp_mulUnitPre15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre15L
+ .align 2
+ .type mcl_fpDbl_mulPre15L,%function
+mcl_fpDbl_mulPre15L: @ @mcl_fpDbl_mulPre15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ .pad #1024
+ sub sp, sp, #1024
+ mov r3, r2
+ mov r4, r0
+ add r0, sp, #1000
+ str r1, [sp, #96] @ 4-byte Spill
+ mov r8, r1
+ ldr r2, [r3]
+ str r3, [sp, #92] @ 4-byte Spill
+ str r4, [sp, #100] @ 4-byte Spill
+ mov r6, r3
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1060]
+ ldr r1, [sp, #1004]
+ ldr r2, [r6, #4]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #1008]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #1012]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r1, [sp, #28] @ 4-byte Spill
+ mov r1, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1032]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1024]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1020]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1016]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1000]
+ str r0, [r4]
+ add r0, sp, #936
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #996]
+ add r10, sp, #960
+ add lr, sp, #936
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #988]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #984]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #980]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #24] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #4]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r4, r1, r0
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ ldr r2, [r6, #8]
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #872
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #932]
+ ldr r8, [sp, #872]
+ add r12, sp, #880
+ ldr lr, [sp, #912]
+ ldr r7, [sp, #908]
+ ldr r11, [sp, #904]
+ ldr r9, [sp, #900]
+ ldr r10, [sp, #876]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ adds r4, r8, r4
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #916]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r5, [sp, #100] @ 4-byte Reload
+ str r4, [r5, #8]
+ ldr r4, [sp, #52] @ 4-byte Reload
+ adcs r4, r10, r4
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r6, #12]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #808
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #868]
+ add r9, sp, #836
+ add lr, sp, #816
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #864]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #860]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #856]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #852]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r6, r7, r8, r9}
+ ldr r0, [sp, #808]
+ ldr r11, [sp, #812]
+ ldm lr, {r1, r2, r3, r12, lr}
+ ldr r10, [sp, #32] @ 4-byte Reload
+ adds r0, r0, r10
+ str r0, [r5, #12]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ ldr r5, [sp, #92] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #16]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r6, r0
+ mov r6, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #744
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #804]
+ add lr, sp, #768
+ add r12, sp, #748
+ ldr r11, [sp, #780]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #784]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r9, r10, lr}
+ ldr r8, [sp, #744]
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r4, [sp, #32] @ 4-byte Reload
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adds r4, r8, r4
+ str r4, [r7, #16]
+ ldr r4, [sp, #52] @ 4-byte Reload
+ mov r7, r5
+ adcs r4, r0, r4
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #20]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #680
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #740]
+ ldr r9, [sp, #680]
+ add lr, sp, #684
+ ldr r10, [sp, #720]
+ ldr r8, [sp, #716]
+ ldr r11, [sp, #712]
+ ldr r6, [sp, #708]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #736]
+ adds r4, r9, r4
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #732]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #728]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #724]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r5, [sp, #100] @ 4-byte Reload
+ str r4, [r5, #20]
+ ldr r4, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #24]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #616
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #676]
+ add r8, sp, #648
+ add lr, sp, #624
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #672]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #664]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #660]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r8, {r6, r7, r8}
+ ldr r10, [sp, #644]
+ ldr r0, [sp, #616]
+ ldr r11, [sp, #620]
+ ldm lr, {r1, r2, r3, r12, lr}
+ ldr r9, [sp, #32] @ 4-byte Reload
+ adds r0, r0, r9
+ str r0, [r5, #24]
+ ldr r0, [sp, #56] @ 4-byte Reload
+ ldr r5, [sp, #92] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #28]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #552
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #612]
+ add r11, sp, #584
+ add r12, sp, #556
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #608]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #604]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #600]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r6, r7, r11}
+ ldr lr, [sp, #580]
+ ldr r9, [sp, #576]
+ ldr r10, [sp, #552]
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r4, [sp, #32] @ 4-byte Reload
+ ldr r8, [sp, #100] @ 4-byte Reload
+ adds r4, r10, r4
+ str r4, [r8, #28]
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r4, r0, r4
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #32]
+ ldr r5, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #488
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #548]
+ ldr r9, [sp, #488]
+ add lr, sp, #492
+ mov r6, r8
+ ldr r10, [sp, #524]
+ ldr r11, [sp, #520]
+ ldr r7, [sp, #516]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ adds r4, r9, r4
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #540]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #536]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #532]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #528]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ str r4, [r6, #32]
+ ldr r4, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #36]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #424
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #484]
+ add r8, sp, #456
+ add lr, sp, #432
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #480]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #476]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #468]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r8, {r5, r7, r8}
+ ldr r10, [sp, #452]
+ ldr r0, [sp, #424]
+ ldr r11, [sp, #428]
+ ldm lr, {r1, r2, r3, r12, lr}
+ ldr r9, [sp, #32] @ 4-byte Reload
+ adds r0, r0, r9
+ str r0, [r6, #36]
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #40]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r7, r0
+ mov r7, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #360
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #420]
+ add r12, sp, #364
+ ldr r11, [sp, #396]
+ ldr r6, [sp, #392]
+ ldr lr, [sp, #388]
+ ldr r9, [sp, #384]
+ ldr r10, [sp, #360]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #404]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #400]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r4, [sp, #32] @ 4-byte Reload
+ ldr r8, [sp, #100] @ 4-byte Reload
+ adds r4, r10, r4
+ str r4, [r8, #40]
+ ldr r4, [sp, #72] @ 4-byte Reload
+ adcs r4, r0, r4
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #44]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #296
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #356]
+ ldr r9, [sp, #296]
+ add lr, sp, #300
+ mov r5, r8
+ ldr r10, [sp, #336]
+ ldr r7, [sp, #332]
+ ldr r11, [sp, #328]
+ ldr r6, [sp, #324]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #352]
+ adds r4, r9, r4
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #348]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #344]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #340]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ str r4, [r5, #44]
+ ldr r4, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #48]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #232
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #292]
+ add lr, sp, #240
+ ldr r8, [sp, #268]
+ ldr r7, [sp, #264]
+ ldr r10, [sp, #260]
+ ldr r3, [sp, #232]
+ ldr r11, [sp, #236]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #288]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #284]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #280]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #276]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r12, lr}
+ ldr r9, [sp, #28] @ 4-byte Reload
+ adds r3, r3, r9
+ add r9, sp, #168
+ str r3, [r5, #48]
+ ldr r3, [r4, #52]
+ ldr r4, [sp, #88] @ 4-byte Reload
+ adcs r4, r11, r4
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [sp, #84] @ 4-byte Reload
+ adcs r11, r0, r4
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r2, r0
+ mov r2, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ mov r0, r9
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #228]
+ add r12, sp, #172
+ ldr r6, [sp, #204]
+ ldr r4, [sp, #200]
+ ldr lr, [sp, #196]
+ ldr r8, [sp, #192]
+ ldr r9, [sp, #188]
+ ldr r2, [sp, #168]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #224]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #220]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #216]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #212]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #208]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r12, {r0, r1, r3, r12}
+ ldr r7, [sp, #32] @ 4-byte Reload
+ adds r2, r2, r7
+ str r2, [r5, #52]
+ adcs r5, r0, r11
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r2, [sp, #92] @ 4-byte Reload
+ adcs r7, r1, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r1, [sp, #4] @ 4-byte Reload
+ ldr r2, [r2, #56]
+ adcs r10, r3, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r11, r12, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #104
+ bl .LmulPv480x32(PLT)
+ add r3, sp, #104
+ add r12, sp, #120
+ ldm r3, {r0, r1, r2, r3}
+ adds r6, r0, r5
+ ldr r0, [sp, #164]
+ adcs lr, r1, r7
+ adcs r4, r2, r10
+ adcs r7, r3, r11
+ add r11, sp, #136
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #160]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #156]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldm r11, {r5, r8, r9, r10, r11}
+ ldm r12, {r1, r2, r3, r12}
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r6, [r0, #56]
+ ldr r6, [sp, #28] @ 4-byte Reload
+ str lr, [r0, #60]
+ str r4, [r0, #64]
+ str r7, [r0, #68]
+ ldr r7, [sp, #80] @ 4-byte Reload
+ ldr r4, [sp, #56] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r6, [r0, #72]
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [r0, #76]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r2, r3, r2
+ ldr r3, [sp, #84] @ 4-byte Reload
+ str r2, [r0, #80]
+ ldr r2, [sp, #68] @ 4-byte Reload
+ adcs r1, r12, r1
+ str r1, [r0, #84]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r12, r5, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ ldr r5, [sp, #52] @ 4-byte Reload
+ adcs r1, r8, r1
+ str r12, [r0, #88]
+ add r12, r0, #92
+ adcs r2, r9, r2
+ adcs r3, r10, r3
+ adcs r7, r11, r7
+ adcs r6, r5, r6
+ ldr r5, [sp, #72] @ 4-byte Reload
+ adcs r5, r4, r5
+ ldr r4, [sp, #96] @ 4-byte Reload
+ stm r12, {r1, r2, r3, r7}
+ str r6, [r0, #108]
+ str r5, [r0, #112]
+ adc r4, r4, #0
+ str r4, [r0, #116]
+ add sp, sp, #44
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end224:
+ .size mcl_fpDbl_mulPre15L, .Lfunc_end224-mcl_fpDbl_mulPre15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre15L
+ .align 2
+ .type mcl_fpDbl_sqrPre15L,%function
+mcl_fpDbl_sqrPre15L: @ @mcl_fpDbl_sqrPre15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #44
+ sub sp, sp, #44
+ .pad #1024
+ sub sp, sp, #1024
+ mov r5, r1
+ mov r4, r0
+ add r0, sp, #1000
+ ldr r2, [r5]
+ str r4, [sp, #100] @ 4-byte Spill
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1060]
+ ldr r1, [sp, #1004]
+ ldr r2, [r5, #4]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #1008]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #1012]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r1, [sp, #36] @ 4-byte Spill
+ mov r1, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #1032]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1024]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1020]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1016]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1000]
+ str r0, [r4]
+ add r0, sp, #936
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #996]
+ add r10, sp, #960
+ add lr, sp, #936
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #988]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #984]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #980]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #32] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #4]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r4, r1, r0
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #8]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #872
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #932]
+ add r12, sp, #896
+ ldr lr, [sp, #912]
+ ldr r6, [sp, #908]
+ add r10, sp, #876
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #916]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r12, {r9, r11, r12}
+ ldr r8, [sp, #872]
+ ldm r10, {r0, r1, r2, r3, r10}
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adds r4, r8, r4
+ str r4, [r7, #8]
+ ldr r4, [sp, #60] @ 4-byte Reload
+ adcs r4, r0, r4
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #12]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #808
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #868]
+ add r10, sp, #836
+ add lr, sp, #812
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #864]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #860]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #856]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #852]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldr r11, [sp, #808]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r11, r4
+ ldr r4, [sp, #100] @ 4-byte Reload
+ str r7, [r4, #12]
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #16]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #744
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #804]
+ add r8, sp, #776
+ add lr, sp, #764
+ add r12, sp, #744
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #792]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r8, {r6, r7, r8}
+ ldm lr, {r9, r10, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r11, [sp, #40] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #16]
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #20]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #680
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #740]
+ add r8, sp, #712
+ add lr, sp, #684
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #736]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #732]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #728]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #724]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r8, {r4, r6, r8}
+ ldr r11, [sp, #708]
+ ldr r10, [sp, #680]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #40] @ 4-byte Reload
+ ldr r9, [sp, #100] @ 4-byte Reload
+ adds r7, r10, r7
+ str r7, [r9, #20]
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adcs r7, r0, r7
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #24]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #616
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #676]
+ add r10, sp, #644
+ add lr, sp, #620
+ mov r4, r9
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #672]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #668]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #664]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #660]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #656]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r8, r10}
+ ldr r11, [sp, #616]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r11, r7
+ str r7, [r4, #24]
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #28]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #552
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #612]
+ add r8, sp, #584
+ add lr, sp, #572
+ add r12, sp, #552
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #608]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #604]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #600]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #596]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r8, {r6, r7, r8}
+ ldm lr, {r9, r10, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r11, [sp, #40] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #28]
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #32]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #488
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #548]
+ add r8, sp, #520
+ add lr, sp, #492
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #540]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #536]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #532]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r8, {r4, r6, r8}
+ ldr r11, [sp, #516]
+ ldr r10, [sp, #488]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #40] @ 4-byte Reload
+ ldr r9, [sp, #100] @ 4-byte Reload
+ adds r7, r10, r7
+ str r7, [r9, #32]
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adcs r7, r0, r7
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #36]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #424
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #484]
+ add r10, sp, #452
+ add lr, sp, #428
+ mov r4, r9
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #480]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #476]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #468]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #464]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r8, r10}
+ ldr r11, [sp, #424]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r11, r7
+ str r7, [r4, #36]
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #40]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #360
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #420]
+ add r8, sp, #392
+ add lr, sp, #380
+ add r12, sp, #360
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #404]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r8, {r6, r7, r8}
+ ldm lr, {r9, r10, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r11, [sp, #40] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #40]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #44]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #296
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #356]
+ add r9, sp, #328
+ add lr, sp, #300
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #352]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #348]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #344]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #340]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r9, {r6, r8, r9}
+ ldr r11, [sp, #324]
+ ldr r10, [sp, #296]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adds r7, r10, r7
+ str r7, [r4, #44]
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adcs r7, r0, r7
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #48]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #232
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #292]
+ add r11, sp, #256
+ add lr, sp, #236
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #288]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #284]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #280]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #276]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r11, {r6, r8, r9, r10, r11}
+ ldr r12, [sp, #232]
+ ldm lr, {r0, r1, r2, r3, lr}
+ adds r7, r12, r7
+ ldr r12, [r5, #52]
+ str r7, [r4, #48]
+ ldr r7, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r7, r1, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r2, r0
+ mov r2, r12
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #168
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #228]
+ add lr, sp, #196
+ add r12, sp, #172
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #224]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #220]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #216]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #212]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #208]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm lr, {r8, r11, lr}
+ ldr r9, [sp, #192]
+ ldr r10, [sp, #188]
+ ldr r2, [sp, #168]
+ ldm r12, {r0, r1, r3, r12}
+ ldr r6, [sp, #40] @ 4-byte Reload
+ adds r2, r2, r6
+ add r6, sp, #104
+ str r2, [r4, #52]
+ adcs r4, r0, r7
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r2, [r5, #56]
+ adcs r0, r1, r0
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r7, r3, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ mov r0, r6
+ bl .LmulPv480x32(PLT)
+ add r3, sp, #104
+ add r11, sp, #136
+ add r12, sp, #120
+ ldm r3, {r0, r1, r2, r3}
+ adds r6, r0, r4
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs lr, r1, r0
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r4, r2, r7
+ adcs r7, r3, r0
+ ldr r0, [sp, #164]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #160]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #156]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldm r11, {r5, r8, r9, r10, r11}
+ ldm r12, {r1, r2, r3, r12}
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r6, [r0, #56]
+ ldr r6, [sp, #36] @ 4-byte Reload
+ str lr, [r0, #60]
+ str r4, [r0, #64]
+ str r7, [r0, #68]
+ ldr r7, [sp, #84] @ 4-byte Reload
+ ldr r4, [sp, #56] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r6, [r0, #72]
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r1, [r0, #76]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r2, r3, r2
+ ldr r3, [sp, #88] @ 4-byte Reload
+ str r2, [r0, #80]
+ ldr r2, [sp, #72] @ 4-byte Reload
+ adcs r1, r12, r1
+ str r1, [r0, #84]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r12, r5, r2
+ ldr r2, [sp, #92] @ 4-byte Reload
+ ldr r5, [sp, #52] @ 4-byte Reload
+ adcs r1, r8, r1
+ str r12, [r0, #88]
+ add r12, r0, #92
+ adcs r2, r9, r2
+ adcs r3, r10, r3
+ adcs r7, r11, r7
+ adcs r6, r5, r6
+ ldr r5, [sp, #76] @ 4-byte Reload
+ adcs r5, r4, r5
+ ldr r4, [sp, #60] @ 4-byte Reload
+ stm r12, {r1, r2, r3, r7}
+ str r6, [r0, #108]
+ str r5, [r0, #112]
+ adc r4, r4, #0
+ str r4, [r0, #116]
+ add sp, sp, #44
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end225:
+ .size mcl_fpDbl_sqrPre15L, .Lfunc_end225-mcl_fpDbl_sqrPre15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont15L
+ .align 2
+ .type mcl_fp_mont15L,%function
+mcl_fp_mont15L: @ @mcl_fp_mont15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #12
+ sub sp, sp, #12
+ .pad #2048
+ sub sp, sp, #2048
+ add r12, sp, #124
+ add r7, sp, #1024
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #88] @ 4-byte Spill
+ add r0, r7, #968
+ ldr r6, [r3, #-4]
+ ldr r2, [r2]
+ str r6, [sp, #120] @ 4-byte Spill
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1996]
+ ldr r5, [sp, #1992]
+ add r7, sp, #1024
+ mov r1, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #2000]
+ mul r2, r5, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #2004]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #2052]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #2048]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #2044]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #2040]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #2036]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #2032]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #2028]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #2024]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #2020]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #2016]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #2012]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2008]
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, r7, #904
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1988]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r9, [sp, #1952]
+ ldr r6, [sp, #1948]
+ ldr r8, [sp, #1944]
+ ldr r4, [sp, #1928]
+ ldr r10, [sp, #1932]
+ ldr r11, [sp, #1936]
+ ldr r7, [sp, #1940]
+ add lr, sp, #1024
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1984]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1980]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1976]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1972]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1968]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1964]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1960]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1956]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, lr, #840
+ bl .LmulPv480x32(PLT)
+ adds r0, r4, r5
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ ldr r3, [sp, #1880]
+ ldr r12, [sp, #1884]
+ ldr lr, [sp, #1888]
+ ldr r4, [sp, #1892]
+ ldr r5, [sp, #1896]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #1908]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r11, r0
+ ldr r11, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #1900]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #1864]
+ adcs r1, r9, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ ldr r9, [sp, #1904]
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #1876]
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #1872]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1924]
+ str r6, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1920]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1916]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1912]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1868]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1024
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r10, #776
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1860]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1828]
+ ldr r11, [sp, #1824]
+ ldr r8, [sp, #1820]
+ ldr r4, [sp, #1816]
+ ldr r5, [sp, #1800]
+ ldr r7, [sp, #1804]
+ ldr r9, [sp, #1808]
+ ldr r10, [sp, #1812]
+ add lr, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1856]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1852]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1848]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1844]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1840]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1836]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1832]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, lr, #712
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1748]
+ ldr r3, [sp, #1752]
+ ldr r12, [sp, #1756]
+ ldr lr, [sp, #1760]
+ adds r0, r0, r5
+ ldr r5, [sp, #1768]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1776]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1780]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1764]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1772]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1736]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1744]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1796]
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1792]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1788]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1784]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1740]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1024
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r10, #648
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1732]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1700]
+ ldr r11, [sp, #1696]
+ ldr r8, [sp, #1692]
+ ldr r4, [sp, #1688]
+ ldr r5, [sp, #1672]
+ ldr r7, [sp, #1676]
+ ldr r9, [sp, #1680]
+ ldr r10, [sp, #1684]
+ add lr, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1728]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1724]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1720]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1716]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1712]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1708]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1704]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, lr, #584
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1620]
+ ldr r3, [sp, #1624]
+ ldr r12, [sp, #1628]
+ ldr lr, [sp, #1632]
+ adds r0, r0, r5
+ ldr r5, [sp, #1640]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1648]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1652]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1636]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1644]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1608]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1616]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1668]
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1664]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1660]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1656]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1612]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1024
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r10, #520
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1604]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1572]
+ ldr r11, [sp, #1568]
+ ldr r8, [sp, #1564]
+ ldr r4, [sp, #1560]
+ ldr r5, [sp, #1544]
+ ldr r7, [sp, #1548]
+ ldr r9, [sp, #1552]
+ ldr r10, [sp, #1556]
+ add lr, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1600]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1596]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1592]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1588]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1584]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1580]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1576]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, lr, #456
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1492]
+ ldr r3, [sp, #1496]
+ ldr r12, [sp, #1500]
+ ldr lr, [sp, #1504]
+ adds r0, r0, r5
+ ldr r5, [sp, #1512]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1520]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1524]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1508]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1516]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1480]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1488]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1540]
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1536]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1532]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1528]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1484]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1024
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r10, #392
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1476]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1444]
+ ldr r11, [sp, #1440]
+ ldr r8, [sp, #1436]
+ ldr r4, [sp, #1432]
+ ldr r5, [sp, #1416]
+ ldr r7, [sp, #1420]
+ ldr r9, [sp, #1424]
+ ldr r10, [sp, #1428]
+ add lr, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1472]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1468]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1464]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1460]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1456]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1452]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1448]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, lr, #328
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1364]
+ ldr r3, [sp, #1368]
+ ldr r12, [sp, #1372]
+ ldr lr, [sp, #1376]
+ adds r0, r0, r5
+ ldr r5, [sp, #1384]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1392]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1396]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1380]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1388]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1352]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1360]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1412]
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1408]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1404]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1400]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1356]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1024
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r10, #264
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1348]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1316]
+ ldr r11, [sp, #1312]
+ ldr r8, [sp, #1308]
+ ldr r4, [sp, #1304]
+ ldr r5, [sp, #1288]
+ ldr r7, [sp, #1292]
+ ldr r9, [sp, #1296]
+ ldr r10, [sp, #1300]
+ add lr, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1344]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1336]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1332]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1328]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1324]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1320]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, lr, #200
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1236]
+ ldr r3, [sp, #1240]
+ ldr r12, [sp, #1244]
+ ldr lr, [sp, #1248]
+ adds r0, r0, r5
+ ldr r5, [sp, #1256]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1264]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1268]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1252]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1260]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1224]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1232]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1284]
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1276]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1272]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1228]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1024
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r10, #136
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1220]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1188]
+ ldr r11, [sp, #1184]
+ ldr r8, [sp, #1180]
+ ldr r4, [sp, #1176]
+ ldr r5, [sp, #1160]
+ ldr r7, [sp, #1164]
+ ldr r9, [sp, #1168]
+ ldr r10, [sp, #1172]
+ add lr, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1216]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1212]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1208]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1204]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1200]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, lr, #72
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1108]
+ ldr r3, [sp, #1112]
+ ldr r12, [sp, #1116]
+ ldr lr, [sp, #1120]
+ adds r0, r0, r5
+ ldr r5, [sp, #1128]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1136]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1140]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1124]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1132]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1096]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1104]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r6, r11, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1156]
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1152]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1100]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1024
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r10, #8
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1092]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1060]
+ ldr r11, [sp, #1056]
+ ldr r8, [sp, #1052]
+ ldr r4, [sp, #1048]
+ ldr r5, [sp, #1032]
+ ldr r7, [sp, #1036]
+ ldr r9, [sp, #1040]
+ ldr r10, [sp, #1044]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1088]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1084]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1080]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1076]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1072]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #968
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #972
+ adds r0, r0, r5
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #996
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1024]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1020]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1016]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #968]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #116] @ 4-byte Reload
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #904
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #964]
+ add r11, sp, #920
+ add r10, sp, #904
+ ldr r6, [sp, #932]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #960]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #956]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #952]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #948]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #944]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #940]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #936]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #840
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #844
+ adds r0, r0, r5
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #880
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ add r11, sp, #868
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #900]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #896]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldm r11, {r4, r5, r11}
+ ldr r6, [sp, #840]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #116] @ 4-byte Reload
+ adds r6, r7, r6
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r6, [sp, #32] @ 4-byte Spill
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ mul r2, r6, r11
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #776
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #836]
+ add r10, sp, #776
+ ldr r4, [sp, #800]
+ ldr r5, [sp, #796]
+ ldr r6, [sp, #792]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #832]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #828]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #824]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #820]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #816]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #712
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #728
+ adds r0, r0, r7
+ ldr r7, [sp, #724]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r8
+ adcs r1, r1, r9
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #752
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #716]
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #720]
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r4
+ ldr r4, [sp, #712]
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adc r1, r1, #0
+ adds r0, r0, r4
+ str r1, [sp, #52] @ 4-byte Spill
+ mul r1, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #772]
+ str r1, [sp, #44] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #768]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #112] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #112] @ 4-byte Spill
+ ldr r6, [sp, #108] @ 4-byte Reload
+ adcs r5, r6, r5
+ str r5, [sp, #108] @ 4-byte Spill
+ ldr r5, [sp, #104] @ 4-byte Reload
+ adcs r5, r5, r7
+ str r5, [sp, #104] @ 4-byte Spill
+ ldr r5, [sp, #100] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #648
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #708]
+ add r10, sp, #648
+ ldr r11, [sp, #676]
+ ldr r4, [sp, #672]
+ ldr r6, [sp, #668]
+ ldr r5, [sp, #664]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #688]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #684]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #680]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #584
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #116] @ 4-byte Reload
+ ldr r1, [sp, #108] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ add lr, sp, #600
+ adds r0, r0, r7
+ add r7, sp, #584
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r8
+ adcs r1, r1, r9
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #624
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #32] @ 4-byte Spill
+ ldm r7, {r4, r5, r6, r7}
+ adds r1, r0, r4
+ ldr r0, [sp, #120] @ 4-byte Reload
+ str r1, [sp, #116] @ 4-byte Spill
+ mul r2, r1, r0
+ ldr r0, [sp, #644]
+ str r2, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #640]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #112] @ 4-byte Reload
+ adcs r5, r11, r5
+ str r5, [sp, #64] @ 4-byte Spill
+ ldr r5, [sp, #108] @ 4-byte Reload
+ adcs r5, r5, r6
+ str r5, [sp, #60] @ 4-byte Spill
+ ldr r5, [sp, #104] @ 4-byte Reload
+ adcs r5, r5, r7
+ str r5, [sp, #56] @ 4-byte Spill
+ ldr r5, [sp, #100] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, sp, #520
+ bl .LmulPv480x32(PLT)
+ ldr r1, [sp, #580]
+ add r11, sp, #524
+ ldr r10, [sp, #548]
+ ldr r5, [sp, #544]
+ ldr r6, [sp, #540]
+ ldr r7, [sp, #520]
+ add r0, sp, #456
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #576]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #572]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #568]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #564]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #560]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #556]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #552]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r11, {r4, r8, r9, r11}
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #116] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #460
+ adds r0, r0, r7
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #484
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #516]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #512]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #456]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #116] @ 4-byte Reload
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #392
+ bl .LmulPv480x32(PLT)
+ ldr r1, [sp, #452]
+ ldr r6, [sp, #420]
+ ldr r7, [sp, #416]
+ ldr r9, [sp, #412]
+ ldr r4, [sp, #408]
+ ldr r10, [sp, #392]
+ ldr r11, [sp, #396]
+ ldr r8, [sp, #400]
+ ldr r5, [sp, #404]
+ add r0, sp, #328
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #448]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #444]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #440]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #436]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #432]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #428]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #424]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [r1, #52]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #332
+ adds r0, r0, r10
+ add r10, sp, #356
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #388]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #384]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #380]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #376]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #328]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #68] @ 4-byte Reload
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #264
+ bl .LmulPv480x32(PLT)
+ ldr r1, [sp, #324]
+ add r9, sp, #276
+ ldr r6, [sp, #292]
+ ldr r7, [sp, #288]
+ ldr r10, [sp, #264]
+ ldr r11, [sp, #268]
+ ldr r5, [sp, #272]
+ add r0, sp, #200
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #320]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #316]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #312]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #308]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #304]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #300]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #296]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r9, {r4, r8, r9}
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [r1, #56]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #64] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ add lr, sp, #216
+ adds r0, r0, r10
+ ldr r10, [sp, #212]
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r11
+ adcs r1, r1, r5
+ ldr r5, [sp, #208]
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r4
+ ldr r4, [sp, #200]
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r9
+ add r9, sp, #240
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #204]
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adc r1, r1, #0
+ adds r7, r0, r4
+ ldr r0, [sp, #120] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ mul r1, r7, r0
+ ldr r0, [sp, #260]
+ str r1, [sp, #60] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #256]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #252]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldm r9, {r4, r8, r9}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #128] @ 4-byte Reload
+ adcs r11, r11, r6
+ ldr r6, [sp, #124] @ 4-byte Reload
+ adcs r5, r6, r5
+ ldr r6, [sp, #68] @ 4-byte Reload
+ adcs r10, r6, r10
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r8, r0, r8
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r6, r0, r9
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r9, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r9
+ str r0, [sp, #128] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ add r0, sp, #136
+ bl .LmulPv480x32(PLT)
+ add r3, sp, #136
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r7, r0
+ adcs r11, r11, r1
+ ldr r0, [sp, #152]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs lr, r5, r2
+ mov r5, r9
+ str r11, [sp, #44] @ 4-byte Spill
+ adcs r10, r10, r3
+ str lr, [sp, #52] @ 4-byte Spill
+ str r10, [sp, #60] @ 4-byte Spill
+ adcs r4, r1, r0
+ ldr r0, [sp, #156]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r4, [sp, #76] @ 4-byte Spill
+ adcs r12, r1, r0
+ ldr r0, [sp, #160]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r12, [sp, #56] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #164]
+ adcs r0, r1, r0
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #168]
+ adcs r0, r1, r0
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #172]
+ adcs r0, r1, r0
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #176]
+ adcs r0, r1, r0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #180]
+ adcs r0, r8, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #184]
+ adcs r0, r6, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ adcs r0, r1, r0
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ adcs r0, r1, r0
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #196]
+ adcs r0, r1, r0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldmib r5, {r1, r2}
+ ldr r3, [r5, #16]
+ ldr r7, [r5]
+ ldr r0, [r5, #12]
+ ldr r6, [r5, #20]
+ ldr r9, [r5, #24]
+ ldr r8, [r5, #32]
+ str r3, [sp, #80] @ 4-byte Spill
+ ldr r3, [r5, #28]
+ subs r7, r11, r7
+ add r11, r5, #36
+ str r3, [sp, #84] @ 4-byte Spill
+ sbcs r3, lr, r1
+ sbcs lr, r10, r2
+ ldm r11, {r1, r10, r11}
+ sbcs r4, r4, r0
+ ldr r0, [r5, #48]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r5, #52]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r5, #56]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ sbcs r2, r12, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ sbcs r12, r0, r6
+ ldr r0, [sp, #96] @ 4-byte Reload
+ ldr r6, [sp, #84] @ 4-byte Reload
+ sbcs r5, r0, r9
+ ldr r0, [sp, #100] @ 4-byte Reload
+ sbcs r6, r0, r6
+ ldr r0, [sp, #104] @ 4-byte Reload
+ sbcs r8, r0, r8
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs r9, r0, r1
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #64] @ 4-byte Reload
+ sbcs r10, r0, r10
+ ldr r0, [sp, #116] @ 4-byte Reload
+ sbcs r11, r0, r11
+ ldr r0, [sp, #120] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ sbcs r0, r0, r1
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbc r0, r0, #0
+ ands r1, r0, #1
+ ldr r0, [sp, #44] @ 4-byte Reload
+ movne r7, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r7, [r0]
+ ldr r7, [sp, #52] @ 4-byte Reload
+ movne r3, r7
+ str r3, [r0, #4]
+ ldr r3, [sp, #60] @ 4-byte Reload
+ movne lr, r3
+ ldr r3, [sp, #76] @ 4-byte Reload
+ cmp r1, #0
+ str lr, [r0, #8]
+ movne r4, r3
+ ldr r3, [sp, #56] @ 4-byte Reload
+ str r4, [r0, #12]
+ movne r2, r3
+ str r2, [r0, #16]
+ ldr r2, [sp, #92] @ 4-byte Reload
+ movne r12, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ cmp r1, #0
+ str r12, [r0, #20]
+ movne r5, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ str r5, [r0, #24]
+ movne r6, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r6, [r0, #28]
+ movne r8, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ cmp r1, #0
+ str r8, [r0, #32]
+ movne r9, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ str r9, [r0, #36]
+ movne r10, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r10, [r0, #40]
+ movne r11, r2
+ cmp r1, #0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r11, [r0, #44]
+ movne r2, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r2, [r0, #48]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r2, [r0, #52]
+ ldr r2, [sp, #132] @ 4-byte Reload
+ movne r2, r1
+ str r2, [r0, #56]
+ add sp, sp, #12
+ add sp, sp, #2048
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end226:
+ .size mcl_fp_mont15L, .Lfunc_end226-mcl_fp_mont15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF15L
+ .align 2
+ .type mcl_fp_montNF15L,%function
+mcl_fp_montNF15L: @ @mcl_fp_montNF15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #4
+ sub sp, sp, #4
+ .pad #2048
+ sub sp, sp, #2048
+ add r12, sp, #116
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #76] @ 4-byte Spill
+ add r0, sp, #1984
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #112] @ 4-byte Spill
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1984]
+ ldr r1, [sp, #1988]
+ str r0, [sp, #60] @ 4-byte Spill
+ mul r2, r0, r5
+ ldr r0, [sp, #2044]
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #1992]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #2040]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #1996]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #2036]
+ str r1, [sp, #80] @ 4-byte Spill
+ mov r1, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #2032]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #2028]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #2024]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #2020]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #2016]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #2012]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2008]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2004]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2000]
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #1920
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1980]
+ add r7, sp, #1936
+ add r11, sp, #1920
+ ldr r6, [sp, #1948]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1976]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1972]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1968]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1964]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1960]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1956]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1952]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r7, {r4, r5, r7}
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r8, [sp, #1932]
+ ldr r2, [r0, #4]
+ add r0, sp, #1856
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #60] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1868]
+ ldr r3, [sp, #1872]
+ ldr r12, [sp, #1876]
+ ldr lr, [sp, #1880]
+ adds r0, r9, r0
+ ldr r9, [sp, #1896]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #1900]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r11, r0
+ ldr r11, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #1892]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #1884]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #1888]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #1856]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adc r0, r1, r0
+ adds r6, r11, r6
+ ldr r1, [sp, #1864]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1916]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1912]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1908]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1904]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1860]
+ adcs r0, r7, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #1792
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1852]
+ add r11, sp, #1808
+ add r10, sp, #1792
+ ldr r6, [sp, #1820]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1848]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1844]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1840]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1836]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1832]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1828]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1824]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #1728
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1740]
+ ldr r3, [sp, #1744]
+ ldr r12, [sp, #1748]
+ ldr lr, [sp, #1752]
+ adds r0, r0, r5
+ ldr r5, [sp, #1760]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1768]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1772]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1756]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1764]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1728]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1736]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1788]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1784]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1780]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1776]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1732]
+ adcs r0, r7, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #1664
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1724]
+ add r11, sp, #1680
+ add r10, sp, #1664
+ ldr r6, [sp, #1692]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1720]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1716]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1712]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1708]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1704]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1700]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1696]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #1600
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1612]
+ ldr r3, [sp, #1616]
+ ldr r12, [sp, #1620]
+ ldr lr, [sp, #1624]
+ adds r0, r0, r5
+ ldr r5, [sp, #1632]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1640]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1644]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1628]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1636]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1600]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1608]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1660]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1656]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1652]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1648]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1604]
+ adcs r0, r7, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #1536
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1596]
+ add r11, sp, #1552
+ add r10, sp, #1536
+ ldr r6, [sp, #1564]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1592]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1588]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1584]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1580]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1576]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1572]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1568]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #1472
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1484]
+ ldr r3, [sp, #1488]
+ ldr r12, [sp, #1492]
+ ldr lr, [sp, #1496]
+ adds r0, r0, r5
+ ldr r5, [sp, #1504]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1512]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1516]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1500]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1508]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1472]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1480]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1532]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1528]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1524]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1520]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1476]
+ adcs r0, r7, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #1408
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1468]
+ add r11, sp, #1424
+ add r10, sp, #1408
+ ldr r6, [sp, #1436]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1464]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1460]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1456]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1452]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1448]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1444]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1440]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #1344
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1356]
+ ldr r3, [sp, #1360]
+ ldr r12, [sp, #1364]
+ ldr lr, [sp, #1368]
+ adds r0, r0, r5
+ ldr r5, [sp, #1376]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1384]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1388]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1372]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1380]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1344]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1352]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1404]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1400]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1396]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1392]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1348]
+ adcs r0, r7, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #1280
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1340]
+ add r11, sp, #1296
+ add r10, sp, #1280
+ ldr r6, [sp, #1308]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1336]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1332]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1328]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1324]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1320]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1316]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1312]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #1216
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1228]
+ ldr r3, [sp, #1232]
+ ldr r12, [sp, #1236]
+ ldr lr, [sp, #1240]
+ adds r0, r0, r5
+ ldr r5, [sp, #1248]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1256]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1260]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1244]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1252]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1216]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1224]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1276]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1272]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1268]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1220]
+ adcs r0, r7, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #1152
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1212]
+ add r11, sp, #1168
+ add r10, sp, #1152
+ ldr r6, [sp, #1180]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1208]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1204]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1200]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1184]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #1088
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1100]
+ ldr r3, [sp, #1104]
+ ldr r12, [sp, #1108]
+ ldr lr, [sp, #1112]
+ adds r0, r0, r5
+ ldr r5, [sp, #1120]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1128]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1132]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1116]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1124]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1088]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1096]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1140]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1092]
+ adcs r0, r7, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #1024
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1084]
+ add r11, sp, #1040
+ add r10, sp, #1024
+ ldr r6, [sp, #1052]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1080]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1076]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1072]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #960
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #964
+ adds r0, r0, r5
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #988
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1020]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1016]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1012]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1008]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #960]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #896
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #956]
+ add r11, sp, #912
+ add r10, sp, #896
+ ldr r6, [sp, #924]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #952]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #948]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #944]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #940]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #936]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #932]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r8, r11}
+ ldm r10, {r5, r7, r9, r10}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #832
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #836
+ adds r0, r0, r5
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #860
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #888]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #884]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #880]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #832]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r6, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #768
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #828]
+ add r11, sp, #768
+ ldr r6, [sp, #792]
+ ldr r5, [sp, #788]
+ ldr r8, [sp, #784]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #824]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #820]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #816]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #812]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r4, [sp, #780]
+ ldr r2, [r0, #40]
+ add r0, sp, #704
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #720
+ adds r0, r0, r9
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r2, r0, r10
+ ldr r0, [sp, #104] @ 4-byte Reload
+ add r10, sp, #744
+ adcs r0, r0, r11
+ ldr r11, [sp, #708]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #716]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #704]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #712]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #48] @ 4-byte Spill
+ adds r0, r2, r5
+ mul r1, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #764]
+ str r1, [sp, #40] @ 4-byte Spill
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #760]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #104] @ 4-byte Reload
+ adcs r7, r7, r11
+ str r7, [sp, #104] @ 4-byte Spill
+ ldr r7, [sp, #100] @ 4-byte Reload
+ adcs r6, r7, r6
+ str r6, [sp, #100] @ 4-byte Spill
+ ldr r6, [sp, #96] @ 4-byte Reload
+ adcs r6, r6, r8
+ str r6, [sp, #96] @ 4-byte Spill
+ ldr r6, [sp, #92] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #640
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #700]
+ add r7, sp, #656
+ add r11, sp, #640
+ ldr r4, [sp, #668]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #688]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #684]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #680]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #676]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #672]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r7, {r5, r6, r7}
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r1, [sp, #116] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #576
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #592
+ adds r0, r0, r8
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r2, r0, r9
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #616
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ add r7, sp, #576
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r7, {r4, r6, r7}
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r5, [sp, #588]
+ adds r1, r2, r4
+ mul r2, r1, r0
+ ldr r0, [sp, #636]
+ str r1, [sp, #108] @ 4-byte Spill
+ str r2, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #632]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #104] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #60] @ 4-byte Spill
+ ldr r6, [sp, #100] @ 4-byte Reload
+ adcs r6, r6, r7
+ str r6, [sp, #56] @ 4-byte Spill
+ ldr r6, [sp, #96] @ 4-byte Reload
+ adcs r5, r6, r5
+ str r5, [sp, #52] @ 4-byte Spill
+ ldr r5, [sp, #92] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #512
+ bl .LmulPv480x32(PLT)
+ ldr r1, [sp, #572]
+ add r11, sp, #520
+ ldr r8, [sp, #540]
+ ldr r9, [sp, #536]
+ ldr r10, [sp, #532]
+ ldr r6, [sp, #512]
+ ldr r7, [sp, #516]
+ add r0, sp, #448
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #568]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #564]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #560]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #556]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #552]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #548]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #544]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r5, r11}
+ ldr r1, [sp, #120] @ 4-byte Reload
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #116] @ 4-byte Reload
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #452
+ adds r0, r0, r6
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #476
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #500]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #496]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #448]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #108] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #384
+ bl .LmulPv480x32(PLT)
+ ldr r1, [sp, #444]
+ add r9, sp, #396
+ ldr r11, [sp, #412]
+ ldr r8, [sp, #408]
+ ldr r5, [sp, #384]
+ ldr r4, [sp, #388]
+ ldr r10, [sp, #392]
+ add r0, sp, #320
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #440]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #436]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #432]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #428]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #424]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #420]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #416]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r9, {r6, r7, r9}
+ ldr r1, [sp, #120] @ 4-byte Reload
+ ldr r2, [r1, #52]
+ ldr r1, [sp, #116] @ 4-byte Reload
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #324
+ adds r0, r0, r5
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #348
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #380]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #376]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #372]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #368]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #320]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #64] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #256
+ bl .LmulPv480x32(PLT)
+ ldr r1, [sp, #316]
+ add r11, sp, #260
+ ldr r8, [sp, #284]
+ ldr r9, [sp, #280]
+ ldr r10, [sp, #276]
+ ldr r7, [sp, #256]
+ add r0, sp, #192
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #312]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #308]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #304]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #300]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #296]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #292]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #288]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r11}
+ ldr r1, [sp, #120] @ 4-byte Reload
+ ldr r2, [r1, #56]
+ ldr r1, [sp, #116] @ 4-byte Reload
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #208
+ adds r0, r0, r7
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ adcs r1, r1, r5
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r1, r1, r10
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r1, r9
+ add r9, sp, #192
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adc r1, r1, r2
+ str r1, [sp, #68] @ 4-byte Spill
+ ldm r9, {r4, r8, r9}
+ ldr r7, [sp, #204]
+ ldr r10, [sp, #236]
+ adds r5, r0, r4
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r4, [sp, #232]
+ mul r1, r5, r0
+ ldr r0, [sp, #252]
+ str r1, [sp, #56] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #248]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #244]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #240]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #120] @ 4-byte Reload
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r8, r11, r8
+ adcs r9, r6, r9
+ ldr r6, [sp, #64] @ 4-byte Reload
+ adcs r7, r6, r7
+ ldr r6, [sp, #60] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r6, r0, r10
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r10, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ add r0, sp, #128
+ bl .LmulPv480x32(PLT)
+ add r3, sp, #128
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r5, r0
+ adcs r11, r8, r1
+ ldr r0, [sp, #144]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs lr, r9, r2
+ str r11, [sp, #40] @ 4-byte Spill
+ adcs r8, r7, r3
+ str lr, [sp, #48] @ 4-byte Spill
+ str r8, [sp, #56] @ 4-byte Spill
+ adcs r4, r1, r0
+ ldr r0, [sp, #148]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r4, [sp, #64] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #152]
+ adcs r0, r1, r0
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #156]
+ adcs r0, r1, r0
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #160]
+ adcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #164]
+ adcs r0, r1, r0
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #168]
+ adcs r0, r1, r0
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #172]
+ adcs r0, r6, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #176]
+ adcs r0, r1, r0
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #180]
+ adcs r0, r1, r0
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #184]
+ adcs r0, r1, r0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ adc r0, r1, r0
+ mov r1, r10
+ add r10, r1, #20
+ str r0, [sp, #120] @ 4-byte Spill
+ ldmib r1, {r0, r6}
+ ldr r2, [r1, #12]
+ ldr r12, [r1, #16]
+ ldm r10, {r5, r9, r10}
+ ldr r7, [r1]
+ subs r7, r11, r7
+ ldr r11, [r1, #36]
+ sbcs r3, lr, r0
+ ldr r0, [r1, #32]
+ sbcs lr, r8, r6
+ ldr r8, [r1, #40]
+ sbcs r4, r4, r2
+ ldr r2, [r1, #44]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ ldr r1, [r1, #56]
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r2, [sp, #52] @ 4-byte Spill
+ sbcs r2, r1, r12
+ ldr r1, [sp, #84] @ 4-byte Reload
+ sbcs r12, r1, r5
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbcs r5, r1, r9
+ ldr r1, [sp, #92] @ 4-byte Reload
+ sbcs r6, r1, r10
+ ldr r1, [sp, #96] @ 4-byte Reload
+ sbcs r9, r1, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ sbcs r10, r0, r11
+ ldr r0, [sp, #104] @ 4-byte Reload
+ sbcs r11, r0, r8
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ sbc r8, r0, r1
+ ldr r0, [sp, #40] @ 4-byte Reload
+ asr r1, r8, #31
+ cmp r1, #0
+ movlt r7, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r7, [r0]
+ ldr r7, [sp, #48] @ 4-byte Reload
+ movlt r3, r7
+ str r3, [r0, #4]
+ ldr r3, [sp, #56] @ 4-byte Reload
+ movlt lr, r3
+ ldr r3, [sp, #64] @ 4-byte Reload
+ cmp r1, #0
+ str lr, [r0, #8]
+ movlt r4, r3
+ ldr r3, [sp, #80] @ 4-byte Reload
+ str r4, [r0, #12]
+ movlt r2, r3
+ ldr r3, [sp, #68] @ 4-byte Reload
+ str r2, [r0, #16]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ movlt r12, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ cmp r1, #0
+ str r12, [r0, #20]
+ movlt r5, r2
+ ldr r2, [sp, #92] @ 4-byte Reload
+ str r5, [r0, #24]
+ movlt r6, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ str r6, [r0, #28]
+ movlt r9, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ cmp r1, #0
+ str r9, [r0, #32]
+ movlt r10, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r10, [r0, #36]
+ movlt r11, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ str r11, [r0, #40]
+ movlt r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #112] @ 4-byte Reload
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r3, [r0, #44]
+ movlt r2, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r2, [r0, #48]
+ ldr r2, [sp, #124] @ 4-byte Reload
+ movlt r2, r1
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r2, [r0, #52]
+ movlt r8, r1
+ str r8, [r0, #56]
+ add sp, sp, #4
+ add sp, sp, #2048
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end227:
+ .size mcl_fp_montNF15L, .Lfunc_end227-mcl_fp_montNF15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed15L
+ .align 2
+ .type mcl_fp_montRed15L,%function
+mcl_fp_montRed15L: @ @mcl_fp_montRed15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #148
+ sub sp, sp, #148
+ .pad #1024
+ sub sp, sp, #1024
+ mov r3, r2
+ str r0, [sp, #192] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r7, [r1]
+ ldr r0, [r3]
+ str r3, [sp, #200] @ 4-byte Spill
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #188] @ 4-byte Spill
+ ldr r0, [r3, #4]
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #184] @ 4-byte Spill
+ ldr r0, [r3, #8]
+ str r2, [sp, #100] @ 4-byte Spill
+ str r0, [sp, #180] @ 4-byte Spill
+ ldr r0, [r3, #12]
+ str r0, [sp, #164] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ str r0, [sp, #168] @ 4-byte Spill
+ ldr r0, [r3, #20]
+ str r0, [sp, #172] @ 4-byte Spill
+ ldr r0, [r3, #24]
+ str r0, [sp, #176] @ 4-byte Spill
+ ldr r0, [r3, #-4]
+ str r0, [sp, #204] @ 4-byte Spill
+ mul r2, r7, r0
+ ldr r0, [r3, #28]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #144] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #148] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #152] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #156] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #160] @ 4-byte Spill
+ ldr r0, [r1, #96]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [r1, #100]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [r1, #104]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [r1, #108]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [r1, #112]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [r1, #116]
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #72]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r1, #76]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #80]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #88]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r1, #92]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r1, #84]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ mov r1, r3
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, sp, #1104
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1164]
+ ldr r9, [sp, #1104]
+ ldr r1, [sp, #1112]
+ ldr r2, [sp, #1116]
+ ldr r3, [sp, #1120]
+ ldr r12, [sp, #1124]
+ ldr lr, [sp, #1128]
+ ldr r4, [sp, #1132]
+ ldr r5, [sp, #1136]
+ ldr r6, [sp, #1140]
+ ldr r8, [sp, #1144]
+ ldr r10, [sp, #1148]
+ ldr r11, [sp, #1152]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1160]
+ adds r7, r7, r9
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1156]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1108]
+ adcs r9, r7, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #200] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ mul r2, r9, r0
+ add r0, sp, #1040
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1100]
+ ldr r4, [sp, #1040]
+ ldr r1, [sp, #1048]
+ ldr r2, [sp, #1052]
+ ldr r8, [sp, #1056]
+ ldr r3, [sp, #1060]
+ ldr r10, [sp, #1064]
+ ldr r11, [sp, #1068]
+ ldr r12, [sp, #1072]
+ ldr r7, [sp, #1076]
+ ldr r6, [sp, #1080]
+ ldr lr, [sp, #1084]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1096]
+ adds r4, r9, r4
+ ldr r4, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1092]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1088]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ adcs r9, r4, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r4, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r9, r4
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ add r0, sp, #976
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #1036]
+ add lr, sp, #1000
+ add r10, sp, #976
+ ldr r5, [sp, #1020]
+ ldr r6, [sp, #1016]
+ ldr r7, [sp, #1012]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1032]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1024]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm lr, {r3, r12, lr}
+ ldr r9, [sp, #996]
+ ldr r2, [sp, #992]
+ ldm r10, {r0, r1, r8, r10}
+ adds r0, r11, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r1
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r4
+ ldr r1, [sp, #200] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, sp, #912
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #972]
+ ldr r4, [sp, #912]
+ add lr, sp, #916
+ ldr r11, [sp, #960]
+ ldr r5, [sp, #956]
+ ldr r6, [sp, #952]
+ ldr r7, [sp, #948]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #968]
+ adds r4, r8, r4
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #964]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r9, r10, r12, lr}
+ ldr r4, [sp, #108] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ str r4, [sp, #12] @ 4-byte Spill
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #200] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #204] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ mul r2, r4, r5
+ adcs r0, r0, r11
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #848
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #908]
+ add r10, sp, #872
+ add lr, sp, #848
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #904]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #900]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #896]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #12] @ 4-byte Reload
+ adds r0, r11, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mov r11, r1
+ adcs r0, r0, r2
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r5
+ mov r1, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #784
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #844]
+ ldr r4, [sp, #784]
+ add r10, sp, #788
+ ldr lr, [sp, #832]
+ ldr r5, [sp, #828]
+ ldr r6, [sp, #824]
+ ldr r7, [sp, #820]
+ ldr r12, [sp, #816]
+ ldr r3, [sp, #812]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #840]
+ adds r4, r11, r4
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #836]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r0, r1, r2, r8, r9, r10}
+ ldr r4, [sp, #108] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r4, [sp, #204] @ 4-byte Reload
+ str r11, [sp, #20] @ 4-byte Spill
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #200] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #720
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #780]
+ add r10, sp, #744
+ add lr, sp, #720
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #776]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #772]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #768]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #764]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r0, r11, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #104] @ 4-byte Reload
+ mov r11, r1
+ adcs r0, r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r4
+ mov r1, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #656
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #716]
+ ldr r4, [sp, #656]
+ add r10, sp, #660
+ ldr lr, [sp, #704]
+ ldr r5, [sp, #700]
+ ldr r6, [sp, #696]
+ ldr r7, [sp, #692]
+ ldr r12, [sp, #688]
+ ldr r3, [sp, #684]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ adds r4, r11, r4
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r0, r1, r2, r8, r9, r10}
+ ldr r4, [sp, #108] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r4, [sp, #200] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ mul r2, r11, r0
+ add r0, sp, #592
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #652]
+ add r10, sp, #616
+ add lr, sp, #592
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #648]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #644]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #640]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #204] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mul r2, r11, r5
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #528
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #588]
+ ldr r4, [sp, #528]
+ add r10, sp, #532
+ ldr lr, [sp, #572]
+ ldr r6, [sp, #568]
+ ldr r7, [sp, #564]
+ ldr r12, [sp, #560]
+ ldr r3, [sp, #556]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ adds r4, r11, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #576]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r0, r1, r2, r8, r9, r10}
+ ldr r4, [sp, #108] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #200] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r9
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #464
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #524]
+ add r10, sp, #488
+ add lr, sp, #464
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #520]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #516]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #512]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r7, r8, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #204] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r11, r5
+ adcs r0, r0, r6
+ mov r6, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #400
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #460]
+ ldr r4, [sp, #400]
+ add r10, sp, #404
+ ldr lr, [sp, #440]
+ ldr r7, [sp, #436]
+ ldr r12, [sp, #432]
+ ldr r3, [sp, #428]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #456]
+ adds r4, r11, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #452]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #448]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #444]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r10, {r0, r1, r2, r8, r9, r10}
+ ldr r4, [sp, #108] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #196] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #336
+ bl .LmulPv480x32(PLT)
+ ldr r0, [sp, #396]
+ add r10, sp, #360
+ add lr, sp, #336
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #392]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #388]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #384]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r7, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #204] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ mul r2, r11, r6
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ add r0, sp, #272
+ bl .LmulPv480x32(PLT)
+ add r5, sp, #272
+ add lr, sp, #288
+ ldm r5, {r0, r1, r3, r5}
+ ldr r9, [sp, #332]
+ ldr r8, [sp, #328]
+ ldr r7, [sp, #312]
+ adds r0, r11, r0
+ ldr r11, [sp, #324]
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r10, r0, r1
+ mul r0, r10, r6
+ ldr r6, [sp, #316]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #320]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r4, [sp, #196] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #200] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r4, r0, r2
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r2, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r8, r0, r9
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ add r0, sp, #208
+ bl .LmulPv480x32(PLT)
+ add r3, sp, #208
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r10, r0
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs lr, r0, r1
+ ldr r0, [sp, #76] @ 4-byte Reload
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str lr, [sp, #80] @ 4-byte Spill
+ adcs r2, r0, r2
+ ldr r0, [sp, #48] @ 4-byte Reload
+ str r2, [sp, #84] @ 4-byte Spill
+ adcs r3, r0, r3
+ ldr r0, [sp, #224]
+ str r3, [sp, #88] @ 4-byte Spill
+ adcs r7, r1, r0
+ ldr r0, [sp, #228]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r7, [sp, #92] @ 4-byte Spill
+ adcs r4, r4, r0
+ ldr r0, [sp, #232]
+ str r4, [sp, #96] @ 4-byte Spill
+ adcs r5, r1, r0
+ ldr r0, [sp, #236]
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r5, [sp, #100] @ 4-byte Spill
+ adcs r6, r1, r0
+ ldr r0, [sp, #240]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r6, [sp, #104] @ 4-byte Spill
+ adcs r11, r1, r0
+ ldr r0, [sp, #244]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r11, [sp, #108] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #196] @ 4-byte Reload
+ str r0, [sp, #200] @ 4-byte Spill
+ ldr r0, [sp, #248]
+ adcs r0, r1, r0
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #252]
+ adcs r0, r1, r0
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #256]
+ adcs r10, r1, r0
+ ldr r0, [sp, #260]
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r10, [sp, #124] @ 4-byte Spill
+ adcs r9, r1, r0
+ ldr r0, [sp, #264]
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r9, [sp, #128] @ 4-byte Spill
+ adcs r8, r8, r0
+ ldr r0, [sp, #268]
+ adcs r12, r1, r0
+ ldr r0, [sp, #116] @ 4-byte Reload
+ ldr r1, [sp, #184] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #188] @ 4-byte Reload
+ subs r0, lr, r0
+ sbcs r1, r2, r1
+ ldr r2, [sp, #180] @ 4-byte Reload
+ sbcs r2, r3, r2
+ ldr r3, [sp, #164] @ 4-byte Reload
+ sbcs r3, r7, r3
+ ldr r7, [sp, #168] @ 4-byte Reload
+ sbcs lr, r4, r7
+ ldr r4, [sp, #172] @ 4-byte Reload
+ ldr r7, [sp, #136] @ 4-byte Reload
+ sbcs r4, r5, r4
+ ldr r5, [sp, #176] @ 4-byte Reload
+ sbcs r5, r6, r5
+ ldr r6, [sp, #132] @ 4-byte Reload
+ sbcs r6, r11, r6
+ ldr r11, [sp, #200] @ 4-byte Reload
+ str r6, [sp, #172] @ 4-byte Spill
+ sbcs r6, r11, r7
+ ldr r7, [sp, #140] @ 4-byte Reload
+ ldr r11, [sp, #204] @ 4-byte Reload
+ str r6, [sp, #176] @ 4-byte Spill
+ ldr r6, [sp, #196] @ 4-byte Reload
+ sbcs r6, r6, r7
+ ldr r7, [sp, #144] @ 4-byte Reload
+ str r6, [sp, #180] @ 4-byte Spill
+ sbcs r6, r11, r7
+ ldr r7, [sp, #148] @ 4-byte Reload
+ str r6, [sp, #184] @ 4-byte Spill
+ sbcs r6, r10, r7
+ ldr r7, [sp, #152] @ 4-byte Reload
+ mov r10, r8
+ str r6, [sp, #188] @ 4-byte Spill
+ sbcs r6, r9, r7
+ ldr r7, [sp, #156] @ 4-byte Reload
+ sbcs r11, r8, r7
+ ldr r7, [sp, #160] @ 4-byte Reload
+ mov r8, r12
+ sbcs r9, r12, r7
+ ldr r7, [sp, #120] @ 4-byte Reload
+ sbc r7, r7, #0
+ ands r12, r7, #1
+ ldr r7, [sp, #80] @ 4-byte Reload
+ movne r0, r7
+ ldr r7, [sp, #192] @ 4-byte Reload
+ str r0, [r7]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ str r1, [r7, #4]
+ ldr r1, [sp, #108] @ 4-byte Reload
+ movne r2, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ cmp r12, #0
+ str r2, [r7, #8]
+ movne r3, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r3, [r7, #12]
+ movne lr, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str lr, [r7, #16]
+ movne r4, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ cmp r12, #0
+ str r4, [r7, #20]
+ movne r5, r0
+ ldr r0, [sp, #172] @ 4-byte Reload
+ movne r0, r1
+ str r5, [r7, #24]
+ ldr r1, [sp, #176] @ 4-byte Reload
+ str r0, [r7, #28]
+ ldr r0, [sp, #200] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #196] @ 4-byte Reload
+ cmp r12, #0
+ str r1, [r7, #32]
+ ldr r1, [sp, #180] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #204] @ 4-byte Reload
+ str r1, [r7, #36]
+ ldr r1, [sp, #184] @ 4-byte Reload
+ movne r1, r0
+ ldr r0, [sp, #188] @ 4-byte Reload
+ str r1, [r7, #40]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ movne r0, r1
+ cmp r12, #0
+ str r0, [r7, #44]
+ ldr r0, [sp, #128] @ 4-byte Reload
+ movne r11, r10
+ movne r9, r8
+ movne r6, r0
+ str r6, [r7, #48]
+ str r11, [r7, #52]
+ str r9, [r7, #56]
+ add sp, sp, #148
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end228:
+ .size mcl_fp_montRed15L, .Lfunc_end228-mcl_fp_montRed15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre15L
+ .align 2
+ .type mcl_fp_addPre15L,%function
+mcl_fp_addPre15L: @ @mcl_fp_addPre15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #52
+ sub sp, sp, #52
+ ldm r1, {r3, r7, r11}
+ ldr r10, [r2]
+ ldr r5, [r2, #16]
+ ldr r6, [r2, #4]
+ ldr r4, [r2, #8]
+ ldr r12, [r2, #12]
+ ldr r8, [r1, #12]
+ ldr r9, [r1, #56]
+ adds lr, r10, r3
+ ldr r3, [r2, #32]
+ str r5, [sp, #8] @ 4-byte Spill
+ ldr r5, [r2, #20]
+ ldr r10, [r1, #44]
+ adcs r6, r6, r7
+ adcs r4, r4, r11
+ ldr r11, [r1, #40]
+ adcs r7, r12, r8
+ add r12, r1, #16
+ ldr r8, [r1, #52]
+ str r3, [sp, #20] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ str r5, [sp, #12] @ 4-byte Spill
+ ldr r5, [r2, #24]
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r5, [sp, #16] @ 4-byte Spill
+ ldr r5, [r2, #28]
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r5, [sp, #24] @ 4-byte Spill
+ ldr r5, [r1, #32]
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r2, #48]
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [r2, #52]
+ ldr r2, [r2, #56]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #36]
+ str r3, [sp, #44] @ 4-byte Spill
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm r12, {r1, r2, r3, r12}
+ str lr, [r0]
+ str r6, [r0, #4]
+ ldr r6, [sp, #8] @ 4-byte Reload
+ str r4, [r0, #8]
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ ldr r4, [sp, #48] @ 4-byte Reload
+ adcs r1, r6, r1
+ ldr r6, [sp, #40] @ 4-byte Reload
+ adcs r2, r7, r2
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r7, [sp, #36] @ 4-byte Reload
+ str r2, [r0, #20]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ adcs r1, r1, r3
+ ldr r3, [sp] @ 4-byte Reload
+ adcs r2, r2, r12
+ str r1, [r0, #24]
+ ldr r1, [sp, #20] @ 4-byte Reload
+ add r12, r0, #32
+ str r2, [r0, #28]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r1, r1, r5
+ ldr r5, [sp, #4] @ 4-byte Reload
+ adcs r2, r2, r3
+ ldr r3, [sp, #32] @ 4-byte Reload
+ adcs r3, r3, r11
+ adcs r7, r7, r10
+ adcs r6, r6, r5
+ ldr r5, [sp, #44] @ 4-byte Reload
+ stm r12, {r1, r2, r3, r7}
+ str r6, [r0, #48]
+ adcs r5, r5, r8
+ adcs r4, r4, r9
+ str r5, [r0, #52]
+ str r4, [r0, #56]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #52
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end229:
+ .size mcl_fp_addPre15L, .Lfunc_end229-mcl_fp_addPre15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre15L
+ .align 2
+ .type mcl_fp_subPre15L,%function
+mcl_fp_subPre15L: @ @mcl_fp_subPre15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #56
+ sub sp, sp, #56
+ ldm r2, {r3, r11}
+ ldr r7, [r1]
+ ldr r5, [r2, #8]
+ ldr r6, [r2, #12]
+ ldmib r1, {r4, r12, lr}
+ ldr r8, [r1, #32]
+ ldr r10, [r1, #52]
+ subs r3, r7, r3
+ ldr r7, [r2, #24]
+ str r3, [sp, #24] @ 4-byte Spill
+ ldr r3, [r2, #32]
+ sbcs r4, r4, r11
+ sbcs r5, r12, r5
+ add r12, r1, #16
+ sbcs r11, lr, r6
+ ldr r6, [r2, #20]
+ ldr lr, [r2, #16]
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [r2, #48]
+ str r3, [sp, #44] @ 4-byte Spill
+ ldr r3, [r2, #52]
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [r2, #56]
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ ldr r2, [r1, #36]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r3, [sp, #20] @ 4-byte Spill
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldm r12, {r1, r2, r3, r12}
+ ldr r9, [sp, #24] @ 4-byte Reload
+ sbcs r1, r1, lr
+ str r9, [r0]
+ stmib r0, {r4, r5}
+ str r11, [r0, #12]
+ sbcs r2, r2, r6
+ str r1, [r0, #16]
+ ldr r6, [sp, #44] @ 4-byte Reload
+ ldr r5, [sp, #48] @ 4-byte Reload
+ ldr r4, [sp, #52] @ 4-byte Reload
+ sbcs r1, r3, r7
+ str r2, [r0, #20]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ ldr r3, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp, #40] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ sbcs r2, r12, r2
+ sbcs r12, r8, r1
+ str r2, [r0, #28]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp] @ 4-byte Reload
+ str r12, [r0, #32]
+ sbcs r2, r1, r2
+ ldr r1, [sp, #4] @ 4-byte Reload
+ sbcs r3, r1, r3
+ ldr r1, [sp, #8] @ 4-byte Reload
+ sbcs r7, r1, r7
+ ldr r1, [sp, #12] @ 4-byte Reload
+ sbcs r6, r1, r6
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r5, r10, r5
+ sbcs r4, r1, r4
+ add r1, r0, #36
+ stm r1, {r2, r3, r7}
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ str r4, [r0, #56]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #56
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end230:
+ .size mcl_fp_subPre15L, .Lfunc_end230-mcl_fp_subPre15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_15L
+ .align 2
+ .type mcl_fp_shr1_15L,%function
+mcl_fp_shr1_15L: @ @mcl_fp_shr1_15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #24
+ sub sp, sp, #24
+ ldmib r1, {r2, r3, r4, r5, r6, r10}
+ ldr r7, [r1]
+ ldr r11, [r1, #52]
+ ldr r8, [r1, #28]
+ ldr lr, [r1, #32]
+ ldr r12, [r1, #36]
+ ldr r9, [r1, #44]
+ str r7, [sp, #4] @ 4-byte Spill
+ lsr r7, r2, #1
+ str r11, [sp, #16] @ 4-byte Spill
+ orr r7, r7, r3, lsl #31
+ str r7, [sp] @ 4-byte Spill
+ ldr r7, [r1, #40]
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [r1, #48]
+ ldr r1, [r1, #56]
+ str r1, [sp, #20] @ 4-byte Spill
+ lsr r1, r4, #1
+ lsrs r4, r4, #1
+ str r7, [sp, #12] @ 4-byte Spill
+ rrx r3, r3
+ lsrs r2, r2, #1
+ orr r1, r1, r5, lsl #31
+ ldr r2, [sp, #4] @ 4-byte Reload
+ rrx r2, r2
+ str r2, [r0]
+ ldr r2, [sp] @ 4-byte Reload
+ stmib r0, {r2, r3}
+ str r1, [r0, #12]
+ lsrs r1, r6, #1
+ lsr r2, r12, #1
+ rrx r1, r5
+ ldr r7, [sp, #8] @ 4-byte Reload
+ ldr r5, [sp, #16] @ 4-byte Reload
+ ldr r4, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ lsr r1, r6, #1
+ orr r1, r1, r10, lsl #31
+ str r1, [r0, #20]
+ lsrs r1, r8, #1
+ rrx r1, r10
+ orr r2, r2, r7, lsl #31
+ str r1, [r0, #24]
+ lsr r1, r8, #1
+ orr r1, r1, lr, lsl #31
+ str r1, [r0, #28]
+ lsrs r1, r12, #1
+ add r12, r0, #32
+ rrx r1, lr
+ lsrs r3, r9, #1
+ rrx r3, r7
+ lsrs r6, r5, #1
+ lsr r7, r9, #1
+ lsr r5, r5, #1
+ orr r7, r7, r4, lsl #31
+ rrx r6, r4
+ ldr r4, [sp, #20] @ 4-byte Reload
+ stm r12, {r1, r2, r3, r7}
+ str r6, [r0, #48]
+ orr r5, r5, r4, lsl #31
+ lsr r4, r4, #1
+ str r5, [r0, #52]
+ str r4, [r0, #56]
+ add sp, sp, #24
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end231:
+ .size mcl_fp_shr1_15L, .Lfunc_end231-mcl_fp_shr1_15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add15L
+ .align 2
+ .type mcl_fp_add15L,%function
+mcl_fp_add15L: @ @mcl_fp_add15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r7}
+ adds r10, r4, r9
+ ldr r4, [r1, #24]
+ adcs r11, r5, r8
+ ldr r5, [r1, #20]
+ mov r8, r10
+ adcs r6, r6, lr
+ mov lr, r11
+ str r8, [r0]
+ adcs r9, r7, r12
+ str r6, [sp, #40] @ 4-byte Spill
+ ldr r6, [r1, #16]
+ ldr r7, [r2, #16]
+ str lr, [r0, #4]
+ str r9, [sp, #8] @ 4-byte Spill
+ adcs r7, r7, r6
+ ldr r6, [r2, #48]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ adcs r7, r7, r5
+ ldr r5, [r2, #28]
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r10, [sp, #32] @ 4-byte Reload
+ adcs r7, r7, r4
+ ldr r4, [r2, #32]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r1, #28]
+ adcs r7, r5, r7
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ ldr r11, [sp, #12] @ 4-byte Reload
+ adcs r7, r4, r7
+ ldr r4, [r2, #36]
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r1, #36]
+ adcs r7, r4, r7
+ ldr r4, [r2, #40]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r1, #40]
+ adcs r7, r4, r7
+ ldr r4, [r2, #44]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r1, #44]
+ adcs r5, r4, r7
+ ldr r7, [r1, #48]
+ ldr r4, [sp, #40] @ 4-byte Reload
+ str r5, [sp, #28] @ 4-byte Spill
+ adcs r12, r6, r7
+ ldr r7, [r1, #52]
+ ldr r6, [r2, #52]
+ ldr r1, [r1, #56]
+ ldr r2, [r2, #56]
+ str r4, [r0, #8]
+ str r9, [r0, #12]
+ ldr r9, [sp, #36] @ 4-byte Reload
+ adcs r6, r6, r7
+ str r9, [r0, #16]
+ str r10, [r0, #20]
+ add r7, r0, #40
+ adcs r2, r2, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r6, [sp, #24] @ 4-byte Spill
+ str r2, [sp, #20] @ 4-byte Spill
+ str r1, [r0, #24]
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r11, [r0, #28]
+ str r1, [r0, #32]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r1, [r0, #36]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ stm r7, {r1, r5, r12}
+ str r6, [r0, #52]
+ str r2, [r0, #56]
+ mov r2, #0
+ adc r1, r2, #0
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r3, {r6, r7}
+ ldr r1, [r3, #8]
+ ldr r2, [r3, #12]
+ subs r5, r8, r6
+ sbcs r7, lr, r7
+ str r5, [sp, #4] @ 4-byte Spill
+ sbcs r1, r4, r1
+ str r7, [sp] @ 4-byte Spill
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #8] @ 4-byte Reload
+ sbcs r1, r1, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ sbcs r9, r9, r1
+ ldr r1, [r3, #20]
+ sbcs r1, r10, r1
+ add r10, r3, #32
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ sbcs r1, r2, r1
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ sbcs r11, r11, r1
+ ldm r10, {r1, r2, r6, r10}
+ ldr r5, [sp, #52] @ 4-byte Reload
+ ldr r8, [r3, #48]
+ ldr r7, [r3, #52]
+ ldr r3, [r3, #56]
+ sbcs r1, r5, r1
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ sbcs r4, r1, r2
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r2, r1, r6
+ ldr r1, [sp, #28] @ 4-byte Reload
+ sbcs lr, r1, r10
+ ldr r1, [sp, #24] @ 4-byte Reload
+ sbcs r6, r12, r8
+ sbcs r5, r1, r7
+ ldr r1, [sp, #20] @ 4-byte Reload
+ sbcs r1, r1, r3
+ ldr r3, [sp, #16] @ 4-byte Reload
+ sbc r3, r3, #0
+ tst r3, #1
+ bne .LBB232_2
+@ BB#1: @ %nocarry
+ ldr r3, [sp, #4] @ 4-byte Reload
+ str r3, [r0]
+ ldr r3, [sp] @ 4-byte Reload
+ str r3, [r0, #4]
+ ldr r3, [sp, #40] @ 4-byte Reload
+ str r3, [r0, #8]
+ ldr r3, [sp, #8] @ 4-byte Reload
+ str r3, [r0, #12]
+ ldr r3, [sp, #36] @ 4-byte Reload
+ str r9, [r0, #16]
+ str r3, [r0, #20]
+ ldr r3, [sp, #56] @ 4-byte Reload
+ str r3, [r0, #24]
+ ldr r3, [sp, #52] @ 4-byte Reload
+ str r11, [r0, #28]
+ str r3, [r0, #32]
+ str r4, [r0, #36]
+ str r2, [r0, #40]
+ str lr, [r0, #44]
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ str r1, [r0, #56]
+.LBB232_2: @ %carry
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end232:
+ .size mcl_fp_add15L, .Lfunc_end232-mcl_fp_add15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF15L
+ .align 2
+ .type mcl_fp_addNF15L,%function
+mcl_fp_addNF15L: @ @mcl_fp_addNF15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #92
+ sub sp, sp, #92
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r7}
+ add r11, r3, #32
+ adds r10, r4, r9
+ ldr r4, [r1, #24]
+ adcs r9, r5, r8
+ ldr r5, [r1, #20]
+ str r10, [sp, #20] @ 4-byte Spill
+ adcs lr, r6, lr
+ ldr r6, [r1, #16]
+ str r9, [sp, #24] @ 4-byte Spill
+ adcs r8, r7, r12
+ ldr r7, [r2, #16]
+ str lr, [sp, #28] @ 4-byte Spill
+ str r8, [sp, #32] @ 4-byte Spill
+ adcs r7, r7, r6
+ ldr r6, [r2, #28]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ adcs r7, r7, r5
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ adcs r7, r7, r4
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r1, #28]
+ adcs r7, r6, r7
+ ldr r6, [r2, #32]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ adcs r7, r6, r7
+ ldr r6, [r2, #36]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r1, #36]
+ adcs r7, r6, r7
+ ldr r6, [r2, #40]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r1, #40]
+ adcs r7, r6, r7
+ ldr r6, [r2, #44]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r1, #44]
+ adcs r7, r6, r7
+ ldr r6, [r2, #48]
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r1, #48]
+ adcs r7, r6, r7
+ ldr r6, [r2, #52]
+ ldr r2, [r2, #56]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r1, #52]
+ ldr r1, [r1, #56]
+ adcs r7, r6, r7
+ adc r1, r2, r1
+ str r7, [sp, #84] @ 4-byte Spill
+ str r1, [sp, #80] @ 4-byte Spill
+ ldmib r3, {r1, r5, r7}
+ ldr r2, [r3, #16]
+ ldr r4, [r3]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r3, #20]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r3, #24]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r3, #28]
+ str r2, [sp, #44] @ 4-byte Spill
+ subs r2, r10, r4
+ sbcs r12, r9, r1
+ ldm r11, {r9, r10, r11}
+ ldr r1, [r3, #44]
+ ldr r4, [sp, #36] @ 4-byte Reload
+ sbcs lr, lr, r5
+ ldr r5, [sp, #64] @ 4-byte Reload
+ sbcs r6, r8, r7
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r1, [sp] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [r3, #56]
+ ldr r3, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r3, r1, r3
+ ldr r1, [sp, #52] @ 4-byte Reload
+ sbcs r4, r1, r4
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r5, r5, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ sbcs r8, r7, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ ldr r7, [sp] @ 4-byte Reload
+ sbcs r9, r1, r9
+ ldr r1, [sp, #76] @ 4-byte Reload
+ sbcs r10, r1, r10
+ ldr r1, [sp, #72] @ 4-byte Reload
+ sbcs r1, r1, r11
+ ldr r11, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #8] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbc r7, r1, r7
+ asr r1, r7, #31
+ cmp r1, #0
+ movlt r2, r11
+ str r2, [r0]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ movlt r12, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r12, [r0, #4]
+ movlt lr, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ cmp r1, #0
+ str lr, [r0, #8]
+ movlt r6, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r6, [r0, #12]
+ movlt r3, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r3, [r0, #16]
+ ldr r3, [sp, #16] @ 4-byte Reload
+ movlt r4, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ cmp r1, #0
+ str r4, [r0, #20]
+ movlt r5, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r5, [r0, #24]
+ movlt r8, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r8, [r0, #28]
+ movlt r9, r2
+ ldr r2, [sp, #76] @ 4-byte Reload
+ cmp r1, #0
+ str r9, [r0, #32]
+ movlt r10, r2
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r10, [r0, #36]
+ movlt r3, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r3, [r0, #40]
+ ldr r3, [sp, #36] @ 4-byte Reload
+ movlt r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #88] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r3, [r0, #44]
+ movlt r2, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r2, [r0, #48]
+ ldr r2, [sp, #44] @ 4-byte Reload
+ movlt r2, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r2, [r0, #52]
+ movlt r7, r1
+ str r7, [r0, #56]
+ add sp, sp, #92
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end233:
+ .size mcl_fp_addNF15L, .Lfunc_end233-mcl_fp_addNF15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub15L
+ .align 2
+ .type mcl_fp_sub15L,%function
+mcl_fp_sub15L: @ @mcl_fp_sub15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #64
+ sub sp, sp, #64
+ ldr r9, [r2]
+ ldmib r2, {r8, lr}
+ ldr r5, [r1]
+ ldr r12, [r2, #12]
+ ldmib r1, {r4, r6, r7}
+ subs r5, r5, r9
+ sbcs r4, r4, r8
+ str r5, [sp, #48] @ 4-byte Spill
+ ldr r5, [r2, #24]
+ sbcs r6, r6, lr
+ str r4, [sp, #60] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ sbcs r7, r7, r12
+ str r6, [sp, #56] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ sbcs r9, r7, r6
+ ldr r7, [r1, #20]
+ ldr r6, [r1, #28]
+ str r9, [sp, #40] @ 4-byte Spill
+ sbcs r7, r7, r4
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r1, #24]
+ sbcs r5, r7, r5
+ ldr r7, [r2, #28]
+ sbcs r10, r6, r7
+ ldr r7, [r2, #32]
+ ldr r6, [r1, #32]
+ str r10, [sp, #36] @ 4-byte Spill
+ sbcs r11, r6, r7
+ ldr r7, [r2, #36]
+ ldr r6, [r1, #36]
+ str r11, [sp, #32] @ 4-byte Spill
+ sbcs lr, r6, r7
+ ldr r7, [r2, #40]
+ ldr r6, [r1, #40]
+ str lr, [sp, #28] @ 4-byte Spill
+ sbcs r12, r6, r7
+ ldr r7, [r2, #44]
+ ldr r6, [r1, #44]
+ str r12, [sp, #24] @ 4-byte Spill
+ sbcs r4, r6, r7
+ ldr r6, [r2, #48]
+ ldr r7, [r1, #48]
+ sbcs r8, r7, r6
+ ldr r6, [r2, #52]
+ ldr r7, [r1, #52]
+ ldr r2, [r2, #56]
+ ldr r1, [r1, #56]
+ sbcs r6, r7, r6
+ ldr r7, [sp, #48] @ 4-byte Reload
+ sbcs r2, r1, r2
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r2, [sp, #20] @ 4-byte Spill
+ str r7, [r0]
+ str r1, [r0, #4]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r1, [r0, #8]
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r1, [r0, #12]
+ str r9, [r0, #16]
+ mov r9, r6
+ mov r6, r5
+ ldr r5, [sp, #44] @ 4-byte Reload
+ mov r1, r4
+ str r5, [r0, #20]
+ str r6, [r0, #24]
+ str r10, [r0, #28]
+ str r11, [r0, #32]
+ str lr, [r0, #36]
+ str r12, [r0, #40]
+ add r12, r0, #44
+ stm r12, {r1, r8, r9}
+ str r2, [r0, #56]
+ mov r2, #0
+ sbc r2, r2, #0
+ tst r2, #1
+ beq .LBB234_2
+@ BB#1: @ %carry
+ ldr r2, [r3, #56]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldmib r3, {r2, lr}
+ ldr r4, [r3, #16]
+ ldr r12, [r3, #12]
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [r3, #20]
+ str r4, [sp, #4] @ 4-byte Spill
+ ldr r4, [r3, #24]
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r3, #28]
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r4, [r3]
+ adds r4, r4, r7
+ ldr r7, [r3, #52]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adcs r11, r2, r7
+ ldr r2, [r3, #48]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [sp, #56] @ 4-byte Reload
+ adcs r7, lr, r2
+ ldr r2, [r3, #44]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [sp, #52] @ 4-byte Reload
+ adcs r2, r12, r2
+ add r12, r3, #32
+ ldm r12, {r3, r10, r12}
+ stm r0, {r4, r11}
+ str r7, [r0, #8]
+ str r2, [r0, #12]
+ ldr r7, [sp, #40] @ 4-byte Reload
+ ldr r4, [sp] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ adcs r4, r4, r7
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r2, r2, r5
+ str r4, [r0, #16]
+ str r2, [r0, #20]
+ ldr r2, [sp, #36] @ 4-byte Reload
+ adcs r4, r7, r6
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r4, [r0, #24]
+ adcs r2, r7, r2
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r2, [r0, #28]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs lr, r3, r2
+ ldr r3, [sp, #28] @ 4-byte Reload
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str lr, [r0, #32]
+ adcs r3, r10, r3
+ adcs r7, r12, r7
+ str r3, [r0, #36]
+ adcs r6, r2, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r7, [r0, #40]
+ str r6, [r0, #44]
+ adcs r5, r1, r8
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r5, [r0, #48]
+ adcs r4, r1, r9
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r4, [r0, #52]
+ adc r1, r2, r1
+ str r1, [r0, #56]
+.LBB234_2: @ %nocarry
+ add sp, sp, #64
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end234:
+ .size mcl_fp_sub15L, .Lfunc_end234-mcl_fp_sub15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF15L
+ .align 2
+ .type mcl_fp_subNF15L,%function
+mcl_fp_subNF15L: @ @mcl_fp_subNF15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #84
+ sub sp, sp, #84
+ mov r12, r0
+ ldr r0, [r2, #32]
+ add r9, r2, #8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r2, #36]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r2, #40]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r2, #44]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r2, #48]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r2, #52]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r2, #56]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r2, {r10, r11}
+ ldm r9, {r5, r6, r7, r9}
+ ldr r0, [r2, #28]
+ ldr r8, [r2, #24]
+ ldr r2, [r1]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldmib r1, {r0, lr}
+ ldr r4, [r1, #12]
+ subs r2, r2, r10
+ add r10, r3, #12
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #36]
+ sbcs r11, r0, r11
+ ldr r0, [r1, #32]
+ sbcs lr, lr, r5
+ ldr r5, [r1, #28]
+ str r11, [sp] @ 4-byte Spill
+ sbcs r6, r4, r6
+ str r6, [sp, #48] @ 4-byte Spill
+ ldr r6, [r1, #16]
+ sbcs r7, r6, r7
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r1, #24]
+ ldr r1, [r1, #20]
+ sbcs r1, r1, r9
+ str r1, [sp, #52] @ 4-byte Spill
+ sbcs r1, r7, r8
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ sbcs r1, r5, r1
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbcs r0, r2, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ sbc r0, r1, r0
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r3, {r2, r5, r7}
+ ldm r10, {r6, r9, r10}
+ ldr r8, [sp, #8] @ 4-byte Reload
+ ldr r4, [sp, #48] @ 4-byte Reload
+ ldr r0, [r3, #28]
+ ldr r1, [r3, #24]
+ adds r2, r8, r2
+ adcs r3, r11, r5
+ mov r11, lr
+ ldr r5, [sp, #56] @ 4-byte Reload
+ adcs lr, r11, r7
+ ldr r7, [sp, #60] @ 4-byte Reload
+ adcs r4, r4, r6
+ ldr r6, [sp, #52] @ 4-byte Reload
+ adcs r5, r5, r9
+ adcs r6, r6, r10
+ adcs r7, r7, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r9, r1, r0
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r10, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r1, r0, r1
+ str r1, [sp, #32] @ 4-byte Spill
+ asr r1, r0, #31
+ ldr r0, [sp] @ 4-byte Reload
+ cmp r1, #0
+ movge r2, r8
+ movge lr, r11
+ str r2, [r12]
+ ldr r2, [sp, #12] @ 4-byte Reload
+ movge r3, r0
+ ldr r0, [sp, #48] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r12, #4]
+ str lr, [r12, #8]
+ movge r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r4, [r12, #12]
+ movge r5, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r5, [r12, #16]
+ movge r6, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ cmp r1, #0
+ str r6, [r12, #20]
+ movge r7, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r7, [r12, #24]
+ movge r9, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r9, [r12, #28]
+ movge r10, r0
+ ldr r0, [sp, #44] @ 4-byte Reload
+ cmp r1, #0
+ str r10, [r12, #32]
+ movge r2, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r2, [r12, #36]
+ ldr r2, [sp, #16] @ 4-byte Reload
+ movge r2, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ str r2, [r12, #40]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ movge r2, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ cmp r1, #0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r2, [r12, #44]
+ movge r1, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ str r1, [r12, #48]
+ ldr r1, [sp, #28] @ 4-byte Reload
+ movge r1, r0
+ ldr r0, [sp, #32] @ 4-byte Reload
+ str r1, [r12, #52]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ movge r0, r1
+ str r0, [r12, #56]
+ add sp, sp, #84
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end235:
+ .size mcl_fp_subNF15L, .Lfunc_end235-mcl_fp_subNF15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add15L
+ .align 2
+ .type mcl_fpDbl_add15L,%function
+mcl_fpDbl_add15L: @ @mcl_fpDbl_add15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #184
+ sub sp, sp, #184
+ ldm r1, {r7, r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r10}
+ adds r4, r4, r7
+ str r4, [sp, #100] @ 4-byte Spill
+ ldr r4, [r2, #96]
+ str r4, [sp, #148] @ 4-byte Spill
+ ldr r4, [r2, #100]
+ str r4, [sp, #164] @ 4-byte Spill
+ ldr r4, [r2, #104]
+ str r4, [sp, #168] @ 4-byte Spill
+ ldr r4, [r2, #108]
+ str r4, [sp, #172] @ 4-byte Spill
+ ldr r4, [r2, #112]
+ str r4, [sp, #176] @ 4-byte Spill
+ ldr r4, [r2, #116]
+ str r4, [sp, #180] @ 4-byte Spill
+ adcs r4, r5, r8
+ adcs r7, r6, lr
+ str r4, [sp, #68] @ 4-byte Spill
+ add lr, r1, #16
+ str r7, [sp, #64] @ 4-byte Spill
+ adcs r7, r10, r12
+ add r10, r1, #32
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #156] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #160] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #128] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r2, #16]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #96]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #104]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #108]
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [r1, #112]
+ str r2, [sp, #120] @ 4-byte Spill
+ ldr r2, [r1, #116]
+ str r2, [sp, #124] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r8, r9, r10}
+ ldr r2, [r1, #56]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #64] @ 4-byte Reload
+ add r11, r3, #32
+ str r7, [r0, #8]
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r1, r7, r1
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r2, r7, r2
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ adcs r1, r1, r12
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [r0, #28]
+ adcs r1, r1, r4
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r2, r2, r5
+ str r2, [r0, #36]
+ adcs r1, r1, r6
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r1, [r0, #40]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r2, r2, r8
+ str r2, [r0, #44]
+ adcs r1, r1, r9
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str r1, [r0, #48]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r2, r2, r10
+ adcs r1, r1, r7
+ str r2, [r0, #52]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #56]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ adcs r12, r2, r7
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r12, [sp, #84] @ 4-byte Spill
+ adcs r9, r1, r2
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r9, [sp, #88] @ 4-byte Spill
+ adcs r6, r1, r2
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r6, [sp, #96] @ 4-byte Spill
+ adcs r7, r1, r2
+ ldr r1, [sp, #144] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r7, [sp, #132] @ 4-byte Spill
+ adcs r4, r1, r2
+ ldr r1, [sp, #152] @ 4-byte Reload
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r4, [sp, #92] @ 4-byte Spill
+ adcs r5, r1, r2
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r5, [sp, #100] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #152] @ 4-byte Spill
+ ldr r1, [sp, #156] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #156] @ 4-byte Spill
+ ldr r1, [sp, #160] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #160] @ 4-byte Spill
+ ldr r1, [sp, #148] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [sp, #164] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ str r1, [sp, #164] @ 4-byte Spill
+ ldr r1, [sp, #168] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r1, [sp, #168] @ 4-byte Spill
+ ldr r1, [sp, #172] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str r1, [sp, #172] @ 4-byte Spill
+ ldr r1, [sp, #176] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ str r1, [sp, #176] @ 4-byte Spill
+ ldr r1, [sp, #180] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #180] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ str r1, [sp, #128] @ 4-byte Spill
+ ldmib r3, {r2, lr}
+ ldr r1, [r3, #16]
+ ldr r8, [r3, #12]
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ str r1, [sp, #136] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ str r1, [sp, #140] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [r3]
+ subs r1, r12, r1
+ sbcs r12, r9, r2
+ ldm r11, {r9, r10, r11}
+ ldr r2, [r3, #44]
+ sbcs lr, r6, lr
+ sbcs r6, r7, r8
+ ldr r7, [sp, #144] @ 4-byte Reload
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r3, #48]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r3, #52]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r3, #56]
+ str r2, [sp, #120] @ 4-byte Spill
+ ldr r2, [sp, #124] @ 4-byte Reload
+ sbcs r3, r4, r2
+ ldr r2, [sp, #136] @ 4-byte Reload
+ sbcs r4, r5, r2
+ ldr r2, [sp, #152] @ 4-byte Reload
+ ldr r5, [sp, #140] @ 4-byte Reload
+ sbcs r5, r2, r5
+ ldr r2, [sp, #156] @ 4-byte Reload
+ sbcs r8, r2, r7
+ ldr r2, [sp, #160] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ sbcs r9, r2, r9
+ ldr r2, [sp, #148] @ 4-byte Reload
+ sbcs r10, r2, r10
+ ldr r2, [sp, #164] @ 4-byte Reload
+ sbcs r2, r2, r11
+ ldr r11, [sp, #84] @ 4-byte Reload
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [sp, #168] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r2, [sp, #124] @ 4-byte Spill
+ ldr r2, [sp, #172] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r2, [sp, #136] @ 4-byte Spill
+ ldr r2, [sp, #176] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp, #120] @ 4-byte Reload
+ str r2, [sp, #140] @ 4-byte Spill
+ ldr r2, [sp, #180] @ 4-byte Reload
+ sbcs r2, r2, r7
+ str r2, [sp, #144] @ 4-byte Spill
+ ldr r2, [sp, #128] @ 4-byte Reload
+ sbc r2, r2, #0
+ ands r2, r2, #1
+ movne r1, r11
+ str r1, [r0, #60]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ movne r12, r1
+ ldr r1, [sp, #96] @ 4-byte Reload
+ str r12, [r0, #64]
+ movne lr, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ cmp r2, #0
+ str lr, [r0, #68]
+ movne r6, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r6, [r0, #72]
+ movne r3, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r3, [r0, #76]
+ ldr r3, [sp, #116] @ 4-byte Reload
+ movne r4, r1
+ ldr r1, [sp, #152] @ 4-byte Reload
+ cmp r2, #0
+ str r4, [r0, #80]
+ movne r5, r1
+ ldr r1, [sp, #156] @ 4-byte Reload
+ str r5, [r0, #84]
+ movne r8, r1
+ ldr r1, [sp, #160] @ 4-byte Reload
+ str r8, [r0, #88]
+ movne r9, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ cmp r2, #0
+ str r9, [r0, #92]
+ movne r10, r1
+ ldr r1, [sp, #164] @ 4-byte Reload
+ str r10, [r0, #96]
+ movne r3, r1
+ ldr r1, [sp, #168] @ 4-byte Reload
+ str r3, [r0, #100]
+ ldr r3, [sp, #124] @ 4-byte Reload
+ movne r3, r1
+ ldr r1, [sp, #172] @ 4-byte Reload
+ cmp r2, #0
+ ldr r2, [sp, #136] @ 4-byte Reload
+ str r3, [r0, #104]
+ movne r2, r1
+ ldr r1, [sp, #176] @ 4-byte Reload
+ str r2, [r0, #108]
+ ldr r2, [sp, #140] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #180] @ 4-byte Reload
+ str r2, [r0, #112]
+ ldr r2, [sp, #144] @ 4-byte Reload
+ movne r2, r1
+ str r2, [r0, #116]
+ add sp, sp, #184
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end236:
+ .size mcl_fpDbl_add15L, .Lfunc_end236-mcl_fpDbl_add15L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub15L
+ .align 2
+ .type mcl_fpDbl_sub15L,%function
+mcl_fpDbl_sub15L: @ @mcl_fpDbl_sub15L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #184
+ sub sp, sp, #184
+ ldr r7, [r2, #96]
+ ldr r9, [r2]
+ add r10, r1, #32
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r2, #100]
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #104]
+ str r7, [sp, #168] @ 4-byte Spill
+ ldr r7, [r2, #108]
+ str r7, [sp, #172] @ 4-byte Spill
+ ldr r7, [r2, #112]
+ str r7, [sp, #176] @ 4-byte Spill
+ ldr r7, [r2, #116]
+ str r7, [sp, #180] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #156] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #160] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #164] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #128] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #116] @ 4-byte Spill
+ ldmib r2, {r8, lr}
+ ldr r5, [r1]
+ ldr r12, [r2, #12]
+ ldmib r1, {r4, r6, r7}
+ subs r5, r5, r9
+ sbcs r4, r4, r8
+ str r5, [sp, #36] @ 4-byte Spill
+ ldr r5, [r2, #48]
+ sbcs r6, r6, lr
+ str r4, [sp, #28] @ 4-byte Spill
+ ldr r4, [r2, #44]
+ add lr, r1, #16
+ sbcs r7, r7, r12
+ str r6, [sp, #24] @ 4-byte Spill
+ ldr r6, [r2, #40]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r5, [sp, #88] @ 4-byte Spill
+ str r4, [sp, #84] @ 4-byte Spill
+ str r6, [sp, #80] @ 4-byte Spill
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r2, #16]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #96]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #92] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r2, [sp, #96] @ 4-byte Spill
+ ldr r2, [r1, #104]
+ str r2, [sp, #100] @ 4-byte Spill
+ ldr r2, [r1, #108]
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #112]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #116]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #76] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r8, r9, r10}
+ ldr r2, [r1, #56]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #24] @ 4-byte Reload
+ ldr r11, [r3, #32]
+ str r7, [r0, #8]
+ ldr r7, [sp, #8] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #20] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ sbcs r1, r12, r1
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r2, lr, r2
+ str r2, [r0, #28]
+ sbcs r1, r4, r1
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r2, r5, r2
+ str r2, [r0, #36]
+ sbcs r1, r6, r1
+ ldr r2, [sp, #84] @ 4-byte Reload
+ str r1, [r0, #40]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbcs r2, r8, r2
+ str r2, [r0, #44]
+ sbcs r1, r9, r1
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r1, [r0, #48]
+ ldr r1, [sp, #120] @ 4-byte Reload
+ sbcs r2, r10, r2
+ sbcs r1, r7, r1
+ str r2, [r0, #52]
+ ldr r2, [sp, #124] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #56]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ sbcs lr, r7, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ sbcs r9, r2, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r9, [sp, #88] @ 4-byte Spill
+ sbcs r1, r2, r1
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [sp, #152] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [sp, #156] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #160] @ 4-byte Reload
+ str r1, [sp, #152] @ 4-byte Spill
+ mov r1, #0
+ sbcs r2, r7, r2
+ ldr r7, [sp, #48] @ 4-byte Reload
+ str r2, [sp, #156] @ 4-byte Spill
+ ldr r2, [sp, #128] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #72] @ 4-byte Reload
+ str r2, [sp, #160] @ 4-byte Spill
+ ldr r2, [sp, #164] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r2, [sp, #164] @ 4-byte Spill
+ ldr r2, [sp, #140] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #92] @ 4-byte Reload
+ str r2, [sp, #140] @ 4-byte Spill
+ ldr r2, [sp, #136] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #96] @ 4-byte Reload
+ str r2, [sp, #136] @ 4-byte Spill
+ ldr r2, [sp, #144] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #100] @ 4-byte Reload
+ str r2, [sp, #144] @ 4-byte Spill
+ ldr r2, [sp, #168] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #104] @ 4-byte Reload
+ str r2, [sp, #168] @ 4-byte Spill
+ ldr r2, [sp, #172] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r2, [sp, #172] @ 4-byte Spill
+ ldr r2, [sp, #176] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r2, [sp, #176] @ 4-byte Spill
+ ldr r2, [sp, #180] @ 4-byte Reload
+ sbcs r2, r7, r2
+ sbc r1, r1, #0
+ str r2, [sp, #180] @ 4-byte Spill
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [r3, #56]
+ str r1, [sp, #128] @ 4-byte Spill
+ ldm r3, {r2, r5, r6}
+ ldr r4, [r3, #12]
+ ldr r12, [r3, #16]
+ ldr r8, [r3, #20]
+ ldr r10, [r3, #28]
+ ldr r7, [r3, #24]
+ ldr r3, [sp, #152] @ 4-byte Reload
+ adds r1, lr, r2
+ ldr r2, [sp, #132] @ 4-byte Reload
+ adcs r5, r9, r5
+ adcs r6, r2, r6
+ ldr r2, [sp, #148] @ 4-byte Reload
+ adcs r2, r2, r4
+ ldr r4, [sp, #156] @ 4-byte Reload
+ adcs r3, r3, r12
+ adcs r12, r4, r8
+ ldr r4, [sp, #160] @ 4-byte Reload
+ adcs r8, r4, r7
+ ldr r4, [sp, #164] @ 4-byte Reload
+ ldr r7, [sp, #140] @ 4-byte Reload
+ adcs r9, r4, r10
+ ldr r4, [sp, #104] @ 4-byte Reload
+ ldr r10, [sp, #128] @ 4-byte Reload
+ adcs r11, r7, r11
+ ldr r7, [sp, #136] @ 4-byte Reload
+ adcs r7, r7, r4
+ ldr r4, [sp, #112] @ 4-byte Reload
+ str r7, [sp, #104] @ 4-byte Spill
+ ldr r7, [sp, #144] @ 4-byte Reload
+ adcs r7, r7, r4
+ ldr r4, [sp, #116] @ 4-byte Reload
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [sp, #168] @ 4-byte Reload
+ adcs r7, r7, r4
+ ldr r4, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #116] @ 4-byte Spill
+ ldr r7, [sp, #172] @ 4-byte Reload
+ adcs r7, r7, r4
+ ldr r4, [sp, #124] @ 4-byte Reload
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [sp, #176] @ 4-byte Reload
+ adcs r7, r7, r4
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [sp, #180] @ 4-byte Reload
+ adc r7, r7, r10
+ str r7, [sp, #128] @ 4-byte Spill
+ ldr r7, [sp, #108] @ 4-byte Reload
+ ands r7, r7, #1
+ moveq r1, lr
+ str r1, [r0, #60]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ moveq r5, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r5, [r0, #64]
+ moveq r6, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ cmp r7, #0
+ str r6, [r0, #68]
+ moveq r2, r1
+ ldr r1, [sp, #152] @ 4-byte Reload
+ str r2, [r0, #72]
+ ldr r2, [sp, #104] @ 4-byte Reload
+ moveq r3, r1
+ ldr r1, [sp, #156] @ 4-byte Reload
+ str r3, [r0, #76]
+ moveq r12, r1
+ ldr r1, [sp, #160] @ 4-byte Reload
+ cmp r7, #0
+ str r12, [r0, #80]
+ moveq r8, r1
+ ldr r1, [sp, #164] @ 4-byte Reload
+ str r8, [r0, #84]
+ moveq r9, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r9, [r0, #88]
+ moveq r11, r1
+ ldr r1, [sp, #136] @ 4-byte Reload
+ cmp r7, #0
+ str r11, [r0, #92]
+ moveq r2, r1
+ ldr r1, [sp, #144] @ 4-byte Reload
+ str r2, [r0, #96]
+ ldr r2, [sp, #112] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #168] @ 4-byte Reload
+ str r2, [r0, #100]
+ ldr r2, [sp, #116] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #172] @ 4-byte Reload
+ cmp r7, #0
+ str r2, [r0, #104]
+ ldr r2, [sp, #120] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #176] @ 4-byte Reload
+ str r2, [r0, #108]
+ ldr r2, [sp, #124] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #180] @ 4-byte Reload
+ str r2, [r0, #112]
+ ldr r2, [sp, #128] @ 4-byte Reload
+ moveq r2, r1
+ str r2, [r0, #116]
+ add sp, sp, #184
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end237:
+ .size mcl_fpDbl_sub15L, .Lfunc_end237-mcl_fpDbl_sub15L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv512x32,%function
+.LmulPv512x32: @ @mulPv512x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r3, [r1, #32]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #32]
+ ldr r3, [r1, #36]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #36]
+ ldr r3, [r1, #40]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #40]
+ ldr r3, [r1, #44]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #44]
+ ldr r3, [r1, #48]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #48]
+ ldr r3, [r1, #52]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #52]
+ ldr r3, [r1, #56]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #56]
+ ldr r1, [r1, #60]
+ umull r3, r7, r1, r2
+ adcs r1, r6, r3
+ str r1, [r0, #60]
+ adc r1, r7, #0
+ str r1, [r0, #64]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end238:
+ .size .LmulPv512x32, .Lfunc_end238-.LmulPv512x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre16L
+ .align 2
+ .type mcl_fp_mulUnitPre16L,%function
+mcl_fp_mulUnitPre16L: @ @mcl_fp_mulUnitPre16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #92
+ sub sp, sp, #92
+ mov r4, r0
+ add r0, sp, #16
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #76]
+ add r11, sp, #40
+ add lr, sp, #16
+ ldr r10, [sp, #80]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #72]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #68]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #64]
+ str r0, [sp] @ 4-byte Spill
+ ldm r11, {r5, r6, r7, r8, r9, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ stm r4, {r0, r1, r2, r3, r12, lr}
+ add r0, r4, #24
+ str r10, [r4, #64]
+ stm r0, {r5, r6, r7, r8, r9, r11}
+ ldr r0, [sp] @ 4-byte Reload
+ str r0, [r4, #48]
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r0, [r4, #52]
+ ldr r0, [sp, #8] @ 4-byte Reload
+ str r0, [r4, #56]
+ ldr r0, [sp, #12] @ 4-byte Reload
+ str r0, [r4, #60]
+ add sp, sp, #92
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end239:
+ .size mcl_fp_mulUnitPre16L, .Lfunc_end239-mcl_fp_mulUnitPre16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre16L
+ .align 2
+ .type mcl_fpDbl_mulPre16L,%function
+mcl_fpDbl_mulPre16L: @ @mcl_fpDbl_mulPre16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #276
+ sub sp, sp, #276
+ mov r6, r2
+ mov r5, r1
+ mov r4, r0
+ bl mcl_fpDbl_mulPre8L(PLT)
+ add r0, r4, #64
+ add r1, r5, #32
+ add r2, r6, #32
+ bl mcl_fpDbl_mulPre8L(PLT)
+ add r11, r6, #32
+ ldm r11, {r9, r10, r11}
+ ldr r0, [r6, #44]
+ ldr r8, [r6, #60]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [r6, #48]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [r6, #52]
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [r6, #56]
+ str r0, [sp, #144] @ 4-byte Spill
+ ldm r6, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [r6, #24]
+ ldr r6, [r6, #28]
+ adds r0, r0, r9
+ str r0, [sp, #136] @ 4-byte Spill
+ adcs r0, r1, r10
+ str r0, [sp, #132] @ 4-byte Spill
+ adcs r0, r2, r11
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, lr, r0
+ add lr, r5, #44
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #144] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ adcs r0, r6, r8
+ str r0, [sp, #108] @ 4-byte Spill
+ mov r0, #0
+ ldm r5, {r8, r10, r11}
+ ldr r7, [r5, #32]
+ ldr r3, [r5, #36]
+ ldr r2, [r5, #40]
+ adc r6, r0, #0
+ ldr r0, [r5, #12]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r5, #16]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r5, #20]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r5, #24]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r5, #28]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldm lr, {r0, r1, r12, lr}
+ ldr r9, [r5, #60]
+ adds r5, r8, r7
+ adcs r3, r10, r3
+ str r5, [sp, #180]
+ str r5, [sp, #144] @ 4-byte Spill
+ adcs r8, r11, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str r3, [sp, #184]
+ str r3, [sp, #140] @ 4-byte Spill
+ str r8, [sp, #188]
+ adcs r11, r2, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ add r2, sp, #148
+ str r11, [sp, #192]
+ adcs r5, r0, r1
+ ldr r0, [sp, #96] @ 4-byte Reload
+ add r1, sp, #180
+ str r5, [sp, #196]
+ adcs r7, r0, r12
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r7, [sp, #200]
+ adcs r10, r0, lr
+ ldr r0, [sp, #104] @ 4-byte Reload
+ str r10, [sp, #204]
+ adcs r0, r0, r9
+ str r0, [sp, #208]
+ mov r9, r0
+ ldr r0, [sp, #136] @ 4-byte Reload
+ str r0, [sp, #148]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #152]
+ ldr r0, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #156]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #160]
+ ldr r0, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #164]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #168]
+ ldr r0, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #172]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #176]
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ add r0, sp, #212
+ bl mcl_fpDbl_mulPre8L(PLT)
+ ldr r0, [sp, #136] @ 4-byte Reload
+ cmp r6, #0
+ ldr r1, [sp, #144] @ 4-byte Reload
+ ldr r2, [sp, #140] @ 4-byte Reload
+ ldr r3, [sp, #124] @ 4-byte Reload
+ moveq r9, r6
+ moveq r10, r6
+ moveq r7, r6
+ moveq r5, r6
+ moveq r11, r6
+ cmp r6, #0
+ moveq r1, r6
+ moveq r8, r6
+ moveq r2, r6
+ str r9, [sp, #104] @ 4-byte Spill
+ str r1, [sp, #144] @ 4-byte Spill
+ str r2, [sp, #140] @ 4-byte Spill
+ str r8, [sp, #96] @ 4-byte Spill
+ adds r12, r1, r0
+ ldr r1, [sp, #132] @ 4-byte Reload
+ adcs lr, r2, r1
+ ldr r2, [sp, #128] @ 4-byte Reload
+ adcs r2, r8, r2
+ ldr r8, [sp, #104] @ 4-byte Reload
+ adcs r9, r11, r3
+ ldr r3, [sp, #120] @ 4-byte Reload
+ adcs r1, r5, r3
+ ldr r3, [sp, #116] @ 4-byte Reload
+ adcs r0, r7, r3
+ ldr r3, [sp, #112] @ 4-byte Reload
+ adcs r3, r10, r3
+ str r3, [sp, #124] @ 4-byte Spill
+ ldr r3, [sp, #108] @ 4-byte Reload
+ adcs r3, r8, r3
+ ldr r8, [sp, #124] @ 4-byte Reload
+ str r3, [sp, #128] @ 4-byte Spill
+ mov r3, #0
+ adc r3, r3, #0
+ str r3, [sp, #136] @ 4-byte Spill
+ ldr r3, [sp, #100] @ 4-byte Reload
+ cmp r3, #0
+ moveq r0, r7
+ moveq r1, r5
+ moveq r9, r11
+ ldr r5, [sp, #136] @ 4-byte Reload
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ moveq r2, r0
+ ldr r0, [sp, #140] @ 4-byte Reload
+ moveq lr, r0
+ ldr r0, [sp, #144] @ 4-byte Reload
+ cmp r3, #0
+ moveq r5, r3
+ and r3, r6, r3
+ ldr r6, [sp, #244]
+ moveq r8, r10
+ moveq r12, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ moveq r7, r0
+ adds r0, r12, r6
+ add r6, sp, #216
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #248]
+ adcs r0, lr, r0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #252]
+ adcs r10, r2, r0
+ ldr r0, [sp, #256]
+ adcs r0, r9, r0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ adcs r0, r1, r0
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #264]
+ adcs r0, r1, r0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ adcs r0, r8, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ adcs r0, r7, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ adc r0, r5, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldm r4, {r1, r12, lr}
+ ldr r5, [sp, #212]
+ ldr r8, [r4, #12]
+ ldm r6, {r2, r3, r6}
+ ldr r0, [sp, #236]
+ ldr r7, [sp, #240]
+ ldr r9, [r4, #72]
+ subs r1, r5, r1
+ ldr r5, [sp, #228]
+ sbcs r2, r2, r12
+ sbcs r12, r3, lr
+ ldr r3, [sp, #140] @ 4-byte Reload
+ sbcs r11, r6, r8
+ ldr r6, [r4, #16]
+ ldr r8, [r4, #68]
+ sbcs lr, r5, r6
+ ldr r5, [r4, #20]
+ ldr r6, [sp, #232]
+ sbcs r5, r6, r5
+ ldr r6, [r4, #24]
+ sbcs r6, r0, r6
+ ldr r0, [r4, #28]
+ sbcs r0, r7, r0
+ ldr r7, [r4, #32]
+ sbcs r3, r3, r7
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r4, #36]
+ str r3, [sp, #84] @ 4-byte Spill
+ ldr r3, [sp, #136] @ 4-byte Reload
+ str r7, [sp, #140] @ 4-byte Spill
+ sbcs r3, r3, r7
+ ldr r7, [r4, #40]
+ str r3, [sp, #76] @ 4-byte Spill
+ sbcs r3, r10, r7
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r4, #44]
+ ldr r10, [r4, #76]
+ str r3, [sp, #72] @ 4-byte Spill
+ ldr r3, [sp, #128] @ 4-byte Reload
+ str r7, [sp, #132] @ 4-byte Spill
+ sbcs r3, r3, r7
+ ldr r7, [r4, #48]
+ str r3, [sp, #68] @ 4-byte Spill
+ ldr r3, [sp, #124] @ 4-byte Reload
+ str r7, [sp, #128] @ 4-byte Spill
+ sbcs r3, r3, r7
+ ldr r7, [r4, #52]
+ str r3, [sp, #64] @ 4-byte Spill
+ ldr r3, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #124] @ 4-byte Spill
+ sbcs r3, r3, r7
+ ldr r7, [r4, #56]
+ str r3, [sp, #60] @ 4-byte Spill
+ ldr r3, [sp, #116] @ 4-byte Reload
+ str r7, [sp, #120] @ 4-byte Spill
+ sbcs r3, r3, r7
+ ldr r7, [r4, #60]
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [sp, #112] @ 4-byte Reload
+ str r7, [sp, #116] @ 4-byte Spill
+ sbcs r3, r3, r7
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [sp, #108] @ 4-byte Reload
+ sbc r3, r3, #0
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [r4, #64]
+ subs r1, r1, r3
+ str r3, [sp, #80] @ 4-byte Spill
+ str r1, [sp, #44] @ 4-byte Spill
+ sbcs r1, r2, r8
+ str r1, [sp, #40] @ 4-byte Spill
+ sbcs r1, r12, r9
+ add r12, r4, #104
+ str r1, [sp, #36] @ 4-byte Spill
+ sbcs r1, r11, r10
+ ldr r11, [r4, #80]
+ str r1, [sp, #32] @ 4-byte Spill
+ sbcs r1, lr, r11
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r4, #84]
+ str r1, [sp, #112] @ 4-byte Spill
+ sbcs r1, r5, r1
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [r4, #88]
+ str r1, [sp, #108] @ 4-byte Spill
+ sbcs r1, r6, r1
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [r4, #92]
+ sbcs r0, r0, r1
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [r4, #100]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r4, #96]
+ str r1, [sp, #96] @ 4-byte Spill
+ str r0, [sp, #100] @ 4-byte Spill
+ ldm r12, {r2, r3, r12}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ ldr lr, [r4, #116]
+ ldr r5, [r4, #120]
+ ldr r6, [r4, #124]
+ sbcs r0, r7, r0
+ str r12, [sp, #92] @ 4-byte Spill
+ str r6, [sp, #88] @ 4-byte Spill
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbcs r0, r0, r2
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ sbcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r0, r12
+ mov r12, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbcs r0, r0, lr
+ mov lr, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ sbcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ sbcs r7, r0, r6
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r6, [sp, #40] @ 4-byte Reload
+ sbc r5, r0, #0
+ ldr r0, [sp, #144] @ 4-byte Reload
+ adds r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [r4, #32]
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #36] @ 4-byte Reload
+ str r1, [r4, #36]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #32] @ 4-byte Reload
+ str r0, [r4, #40]
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #28] @ 4-byte Reload
+ str r1, [r4, #44]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #24] @ 4-byte Reload
+ str r0, [r4, #48]
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #20] @ 4-byte Reload
+ str r1, [r4, #52]
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #16] @ 4-byte Reload
+ str r0, [r4, #56]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r1, [r4, #60]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #76] @ 4-byte Reload
+ str r0, [r4, #64]
+ adcs r1, r8, r1
+ ldr r0, [sp, #12] @ 4-byte Reload
+ str r1, [r4, #68]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [r4, #72]
+ adcs r1, r10, r1
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r1, [r4, #76]
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r0, r11, r0
+ adcs r1, r1, r6
+ str r0, [r4, #80]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r6, [sp, #84] @ 4-byte Reload
+ str r1, [r4, #84]
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [r4, #88]
+ adcs r1, r1, r7
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r1, [r4, #92]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r8, r0, r5
+ ldr r5, [sp, #92] @ 4-byte Reload
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, #0
+ str r8, [r4, #96]
+ adcs r2, r2, #0
+ adcs r3, r3, #0
+ adcs r7, r5, #0
+ adcs r6, r12, #0
+ adcs r5, lr, #0
+ adc r12, r0, #0
+ add r0, r4, #100
+ stm r0, {r1, r2, r3, r7}
+ str r6, [r4, #116]
+ str r5, [r4, #120]
+ str r12, [r4, #124]
+ add sp, sp, #276
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end240:
+ .size mcl_fpDbl_mulPre16L, .Lfunc_end240-mcl_fpDbl_mulPre16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre16L
+ .align 2
+ .type mcl_fpDbl_sqrPre16L,%function
+mcl_fpDbl_sqrPre16L: @ @mcl_fpDbl_sqrPre16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #276
+ sub sp, sp, #276
+ mov r5, r1
+ mov r4, r0
+ mov r2, r5
+ bl mcl_fpDbl_mulPre8L(PLT)
+ add r1, r5, #32
+ add r0, r4, #64
+ mov r2, r1
+ bl mcl_fpDbl_mulPre8L(PLT)
+ ldm r5, {r8, r9, r10}
+ ldr r0, [r5, #12]
+ ldr r6, [r5, #32]
+ ldr r7, [r5, #36]
+ ldr r3, [r5, #40]
+ add lr, r5, #44
+ ldr r11, [r5, #16]
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [r5, #20]
+ adds r6, r8, r6
+ adcs r7, r9, r7
+ adcs r3, r10, r3
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [r5, #24]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [r5, #28]
+ str r0, [sp, #144] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r12, lr}
+ ldr r5, [sp, #136] @ 4-byte Reload
+ str r6, [sp, #180]
+ str r7, [sp, #184]
+ str r6, [sp, #148]
+ str r3, [sp, #128] @ 4-byte Spill
+ str r3, [sp, #188]
+ str r7, [sp, #152]
+ adcs r10, r5, r0
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r11, r11, r1
+ str r10, [sp, #192]
+ add r1, sp, #180
+ str r11, [sp, #196]
+ adcs r8, r0, r2
+ ldr r0, [sp, #132] @ 4-byte Reload
+ add r2, sp, #148
+ str r8, [sp, #200]
+ adcs r9, r0, r12
+ ldr r0, [sp, #144] @ 4-byte Reload
+ str r9, [sp, #204]
+ adcs r5, r0, lr
+ add r0, sp, #156
+ str r5, [sp, #208]
+ stm r0, {r3, r10, r11}
+ mov r0, #0
+ str r8, [sp, #168]
+ str r9, [sp, #172]
+ str r5, [sp, #176]
+ adc r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ add r0, sp, #212
+ bl mcl_fpDbl_mulPre8L(PLT)
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adds r2, r6, r6
+ ldr r1, [sp, #244]
+ ldr r6, [sp, #248]
+ ldr lr, [sp, #264]
+ ldr r12, [sp, #268]
+ adcs r3, r7, r7
+ adcs r7, r0, r0
+ str r1, [sp, #128] @ 4-byte Spill
+ str r6, [sp, #116] @ 4-byte Spill
+ str r12, [sp, #108] @ 4-byte Spill
+ adcs r10, r10, r10
+ adcs r0, r11, r11
+ ldr r11, [sp, #252]
+ str r0, [sp, #144] @ 4-byte Spill
+ adcs r0, r8, r8
+ ldr r8, [sp, #260]
+ str r0, [sp, #140] @ 4-byte Spill
+ adcs r0, r9, r9
+ ldr r9, [sp, #256]
+ str r0, [sp, #120] @ 4-byte Spill
+ adc r0, r5, r5
+ adds r2, r1, r2
+ adcs r1, r6, r3
+ str r2, [sp, #132] @ 4-byte Spill
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #144] @ 4-byte Reload
+ adcs r7, r11, r7
+ adcs r3, r9, r10
+ adcs r2, r8, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ adcs r1, lr, r1
+ adcs r10, r12, r6
+ ldr r6, [sp, #112] @ 4-byte Reload
+ adcs r12, r0, r6
+ mov r6, r0
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adc r5, r0, r5, lsr #31
+ cmp r0, #0
+ moveq r1, lr
+ moveq r2, r8
+ moveq r3, r9
+ moveq r7, r11
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r2, [sp, #140] @ 4-byte Spill
+ ldr r2, [sp, #128] @ 4-byte Reload
+ str r3, [sp, #120] @ 4-byte Spill
+ add r3, sp, #216
+ moveq r10, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ cmp r0, #0
+ moveq r12, r6
+ ldr r6, [sp, #124] @ 4-byte Reload
+ moveq r5, r0
+ str r12, [sp, #112] @ 4-byte Spill
+ moveq r6, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldm r4, {r12, lr}
+ ldr r9, [sp, #212]
+ ldr r11, [r4, #8]
+ ldr r8, [r4, #12]
+ moveq r1, r2
+ ldm r3, {r0, r2, r3}
+ subs r12, r9, r12
+ sbcs r9, r0, lr
+ ldr r0, [r4, #16]
+ sbcs r11, r2, r11
+ ldr r2, [sp, #228]
+ sbcs lr, r3, r8
+ ldr r8, [r4, #68]
+ sbcs r0, r2, r0
+ ldr r2, [sp, #232]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [r4, #20]
+ sbcs r0, r2, r0
+ ldr r2, [sp, #236]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [r4, #24]
+ sbcs r0, r2, r0
+ ldr r2, [sp, #240]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r4, #28]
+ sbcs r3, r2, r0
+ ldr r0, [r4, #32]
+ str r0, [sp, #136] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r4, #36]
+ str r0, [sp, #132] @ 4-byte Spill
+ sbcs r0, r6, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r4, #40]
+ str r0, [sp, #128] @ 4-byte Spill
+ sbcs r0, r7, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r4, #44]
+ str r0, [sp, #124] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r4, #48]
+ str r0, [sp, #120] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [r4, #52]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #144] @ 4-byte Reload
+ str r1, [sp, #140] @ 4-byte Spill
+ sbcs r0, r0, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r4, #56]
+ str r0, [sp, #144] @ 4-byte Spill
+ sbcs r0, r10, r0
+ ldr r10, [r4, #76]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r4, #60]
+ str r0, [sp, #116] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ sbc r0, r5, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r4, #64]
+ str r0, [sp, #80] @ 4-byte Spill
+ subs r0, r12, r0
+ add r12, r4, #104
+ str r0, [sp, #44] @ 4-byte Spill
+ sbcs r0, r9, r8
+ ldr r9, [r4, #72]
+ str r0, [sp, #40] @ 4-byte Spill
+ sbcs r0, r11, r9
+ ldr r11, [r4, #80]
+ str r0, [sp, #36] @ 4-byte Spill
+ sbcs r0, lr, r10
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs r0, r0, r11
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r4, #84]
+ str r0, [sp, #112] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r4, #88]
+ str r0, [sp, #108] @ 4-byte Spill
+ sbcs r0, r1, r0
+ ldr r1, [r4, #100]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r4, #92]
+ str r1, [sp, #96] @ 4-byte Spill
+ str r0, [sp, #104] @ 4-byte Spill
+ sbcs r0, r3, r0
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r4, #96]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldm r12, {r2, r3, r12}
+ ldr r7, [sp, #84] @ 4-byte Reload
+ ldr lr, [r4, #116]
+ ldr r5, [r4, #120]
+ ldr r6, [r4, #124]
+ sbcs r0, r7, r0
+ str r12, [sp, #92] @ 4-byte Spill
+ str r6, [sp, #88] @ 4-byte Spill
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbcs r0, r0, r2
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ sbcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r0, r12
+ mov r12, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbcs r0, r0, lr
+ mov lr, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ sbcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ sbcs r7, r0, r6
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r6, [sp, #40] @ 4-byte Reload
+ sbc r5, r0, #0
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adds r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [r4, #32]
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #36] @ 4-byte Reload
+ str r1, [r4, #36]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #32] @ 4-byte Reload
+ str r0, [r4, #40]
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #28] @ 4-byte Reload
+ str r1, [r4, #44]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #24] @ 4-byte Reload
+ str r0, [r4, #48]
+ ldr r0, [sp, #144] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #20] @ 4-byte Reload
+ str r1, [r4, #52]
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #16] @ 4-byte Reload
+ str r0, [r4, #56]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r6
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r1, [r4, #60]
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #76] @ 4-byte Reload
+ str r0, [r4, #64]
+ adcs r1, r8, r1
+ ldr r0, [sp, #12] @ 4-byte Reload
+ str r1, [r4, #68]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [r4, #72]
+ adcs r1, r10, r1
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r1, [r4, #76]
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r0, r11, r0
+ adcs r1, r1, r6
+ str r0, [r4, #80]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r6, [sp, #84] @ 4-byte Reload
+ str r1, [r4, #84]
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [r4, #88]
+ adcs r1, r1, r7
+ ldr r0, [sp, #100] @ 4-byte Reload
+ str r1, [r4, #92]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r8, r0, r5
+ ldr r5, [sp, #92] @ 4-byte Reload
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, #0
+ str r8, [r4, #96]
+ adcs r2, r2, #0
+ adcs r3, r3, #0
+ adcs r7, r5, #0
+ adcs r6, r12, #0
+ adcs r5, lr, #0
+ adc r12, r0, #0
+ add r0, r4, #100
+ stm r0, {r1, r2, r3, r7}
+ str r6, [r4, #116]
+ str r5, [r4, #120]
+ str r12, [r4, #124]
+ add sp, sp, #276
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end241:
+ .size mcl_fpDbl_sqrPre16L, .Lfunc_end241-mcl_fpDbl_sqrPre16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont16L
+ .align 2
+ .type mcl_fp_mont16L,%function
+mcl_fp_mont16L: @ @mcl_fp_mont16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #404
+ sub sp, sp, #404
+ .pad #2048
+ sub sp, sp, #2048
+ add r12, sp, #132
+ add r6, sp, #2048
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #92] @ 4-byte Spill
+ add r0, r6, #328
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #128] @ 4-byte Spill
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #2376]
+ ldr r1, [sp, #2380]
+ str r0, [sp, #72] @ 4-byte Spill
+ mul r2, r0, r5
+ ldr r0, [sp, #2440]
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #2384]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #2436]
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #2388]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #2432]
+ str r1, [sp, #88] @ 4-byte Spill
+ mov r1, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #2428]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #2424]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #2420]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #2416]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #2412]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #2408]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #2404]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #2400]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #2396]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2392]
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #2304
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #2368]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r7, [sp, #2332]
+ ldr r4, [sp, #2328]
+ ldr r8, [sp, #2324]
+ ldr r11, [sp, #2320]
+ ldr r9, [sp, #2304]
+ ldr r10, [sp, #2308]
+ ldr r6, [sp, #2312]
+ ldr r5, [sp, #2316]
+ add lr, sp, #2048
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #2364]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2360]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2356]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2352]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2348]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2344]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2340]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2336]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, lr, #184
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ ldr r3, [sp, #2248]
+ ldr r12, [sp, #2252]
+ ldr lr, [sp, #2256]
+ adds r0, r9, r0
+ ldr r9, [sp, #2272]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #2276]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r6, [sp, #96] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #2264]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r11, r0
+ ldr r11, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #2268]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #2260]
+ adcs r1, r7, r1
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ ldr r7, [sp, #2232]
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #124] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #2244]
+ adc r0, r0, #0
+ adds r7, r11, r7
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #2240]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2296]
+ str r7, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2292]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2288]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2284]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #2280]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #2236]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #2160
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #2224]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #2188]
+ ldr r6, [sp, #2184]
+ ldr r8, [sp, #2180]
+ ldr r9, [sp, #2176]
+ ldr r10, [sp, #2160]
+ ldr r11, [sp, #2164]
+ ldr r4, [sp, #2168]
+ ldr r7, [sp, #2172]
+ add lr, sp, #2048
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2220]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2216]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2212]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2208]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2204]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2200]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2196]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2192]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, lr, #40
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #2100]
+ ldr r3, [sp, #2104]
+ ldr r12, [sp, #2108]
+ ldr lr, [sp, #2112]
+ adds r0, r0, r10
+ ldr r10, [sp, #2132]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #2116]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #2088]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #2128]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #2124]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #2120]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #2096]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r11, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2152]
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2148]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2144]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2140]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2136]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2092]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #2016
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #2080]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #2044]
+ ldr r6, [sp, #2040]
+ ldr r8, [sp, #2036]
+ ldr r9, [sp, #2032]
+ ldr r10, [sp, #2016]
+ ldr r11, [sp, #2020]
+ ldr r4, [sp, #2024]
+ ldr r7, [sp, #2028]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2076]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2072]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2068]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2064]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2060]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2056]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2052]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2048]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, lr, #920
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1956]
+ ldr r3, [sp, #1960]
+ ldr r12, [sp, #1964]
+ ldr lr, [sp, #1968]
+ adds r0, r0, r10
+ ldr r10, [sp, #1988]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1972]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1944]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1984]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1980]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1976]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1952]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r11, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2008]
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2004]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2000]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1996]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1992]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1948]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1872
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1936]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #1900]
+ ldr r6, [sp, #1896]
+ ldr r8, [sp, #1892]
+ ldr r9, [sp, #1888]
+ ldr r10, [sp, #1872]
+ ldr r11, [sp, #1876]
+ ldr r4, [sp, #1880]
+ ldr r7, [sp, #1884]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1932]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1928]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1924]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1920]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1916]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1912]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1908]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1904]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, lr, #776
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1812]
+ ldr r3, [sp, #1816]
+ ldr r12, [sp, #1820]
+ ldr lr, [sp, #1824]
+ adds r0, r0, r10
+ ldr r10, [sp, #1844]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1828]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1800]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1840]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1836]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1832]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1808]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r11, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1864]
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1860]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1856]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1852]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1848]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1804]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1728
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1792]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #1756]
+ ldr r6, [sp, #1752]
+ ldr r8, [sp, #1748]
+ ldr r9, [sp, #1744]
+ ldr r10, [sp, #1728]
+ ldr r11, [sp, #1732]
+ ldr r4, [sp, #1736]
+ ldr r7, [sp, #1740]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1788]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1784]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1780]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1776]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1772]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1768]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1764]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1760]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, lr, #632
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1668]
+ ldr r3, [sp, #1672]
+ ldr r12, [sp, #1676]
+ ldr lr, [sp, #1680]
+ adds r0, r0, r10
+ ldr r10, [sp, #1700]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1684]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1656]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1696]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1692]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1688]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1664]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r11, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1720]
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1716]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1712]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1708]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1704]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1660]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1584
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1648]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #1612]
+ ldr r6, [sp, #1608]
+ ldr r8, [sp, #1604]
+ ldr r9, [sp, #1600]
+ ldr r10, [sp, #1584]
+ ldr r11, [sp, #1588]
+ ldr r4, [sp, #1592]
+ ldr r7, [sp, #1596]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1644]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1640]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1636]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1632]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1628]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1624]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1620]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1616]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, lr, #488
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1524]
+ ldr r3, [sp, #1528]
+ ldr r12, [sp, #1532]
+ ldr lr, [sp, #1536]
+ adds r0, r0, r10
+ ldr r10, [sp, #1556]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1540]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1512]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1552]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1548]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1544]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1520]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r11, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1576]
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1572]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1568]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1564]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1560]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1516]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1440
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1504]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #1468]
+ ldr r6, [sp, #1464]
+ ldr r8, [sp, #1460]
+ ldr r9, [sp, #1456]
+ ldr r10, [sp, #1440]
+ ldr r11, [sp, #1444]
+ ldr r4, [sp, #1448]
+ ldr r7, [sp, #1452]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1500]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1496]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1492]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1488]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1484]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1480]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1476]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1472]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, lr, #344
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1380]
+ ldr r3, [sp, #1384]
+ ldr r12, [sp, #1388]
+ ldr lr, [sp, #1392]
+ adds r0, r0, r10
+ ldr r10, [sp, #1412]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1396]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1368]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1408]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1404]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1400]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1376]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r11, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1432]
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1428]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1424]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1420]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1416]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1372]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1296
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1360]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #1324]
+ ldr r6, [sp, #1320]
+ ldr r8, [sp, #1316]
+ ldr r9, [sp, #1312]
+ ldr r10, [sp, #1296]
+ ldr r11, [sp, #1300]
+ ldr r4, [sp, #1304]
+ ldr r7, [sp, #1308]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1356]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1352]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1348]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1344]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1336]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1332]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1328]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, lr, #200
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1236]
+ ldr r3, [sp, #1240]
+ ldr r12, [sp, #1244]
+ ldr lr, [sp, #1248]
+ adds r0, r0, r10
+ ldr r10, [sp, #1268]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1252]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1224]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1264]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1260]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1256]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1232]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r11, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1288]
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1284]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1276]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1272]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1228]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1152
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1216]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #1180]
+ ldr r6, [sp, #1176]
+ ldr r8, [sp, #1172]
+ ldr r9, [sp, #1168]
+ ldr r10, [sp, #1152]
+ ldr r11, [sp, #1156]
+ ldr r4, [sp, #1160]
+ ldr r7, [sp, #1164]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1212]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1208]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1204]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1200]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1184]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, lr, #56
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1092]
+ ldr r3, [sp, #1096]
+ ldr r12, [sp, #1100]
+ ldr lr, [sp, #1104]
+ adds r0, r0, r10
+ ldr r10, [sp, #1124]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1108]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1080]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1120]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1116]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1112]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1088]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r7, r11, r7
+ ldr r11, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1140]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1132]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1128]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1084]
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r7, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #1008
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1072]
+ add r10, sp, #1008
+ ldr r4, [sp, #1032]
+ ldr r5, [sp, #1028]
+ ldr r6, [sp, #1024]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #936
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #952
+ adds r0, r0, r7
+ ldr r7, [sp, #948]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r2, r0, r8
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #976
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #940]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #944]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #936]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ adds r0, r2, r4
+ mul r1, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #1000]
+ str r1, [sp, #48] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #996]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #120] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #120] @ 4-byte Spill
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r5, r6, r5
+ str r5, [sp, #116] @ 4-byte Spill
+ ldr r5, [sp, #112] @ 4-byte Reload
+ adcs r5, r5, r7
+ str r5, [sp, #112] @ 4-byte Spill
+ ldr r5, [sp, #108] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #864
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #928]
+ add r10, sp, #864
+ ldr r11, [sp, #892]
+ ldr r4, [sp, #888]
+ ldr r5, [sp, #884]
+ ldr r6, [sp, #880]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #916]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #912]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #908]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #904]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #900]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #896]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #792
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #124] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #808
+ adds r0, r0, r7
+ ldr r7, [sp, #804]
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r2, r0, r8
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #832
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #796]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #800]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #792]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r1, r2, r4
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ str r1, [sp, #124] @ 4-byte Spill
+ mul r2, r1, r0
+ ldr r0, [sp, #856]
+ str r2, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #852]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #848]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #120] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #72] @ 4-byte Spill
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r5, r6, r5
+ str r5, [sp, #68] @ 4-byte Spill
+ ldr r5, [sp, #112] @ 4-byte Reload
+ adcs r5, r5, r7
+ str r5, [sp, #64] @ 4-byte Spill
+ ldr r5, [sp, #108] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ add r0, sp, #720
+ bl .LmulPv512x32(PLT)
+ ldr r1, [sp, #784]
+ add r10, sp, #720
+ ldr r5, [sp, #748]
+ ldr r6, [sp, #744]
+ ldr r7, [sp, #740]
+ ldr r11, [sp, #736]
+ add r0, sp, #648
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #780]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #776]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #772]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #768]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #764]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #760]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #756]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #752]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r4, [sp, #732]
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #124] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #652
+ adds r0, r0, r8
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #676
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #648]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #124] @ 4-byte Reload
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #576
+ bl .LmulPv512x32(PLT)
+ ldr r1, [sp, #640]
+ add r11, sp, #584
+ ldr r6, [sp, #604]
+ ldr r5, [sp, #600]
+ ldr r8, [sp, #596]
+ ldr r9, [sp, #576]
+ ldr r10, [sp, #580]
+ add r0, sp, #504
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #636]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #632]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #628]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #624]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #620]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #616]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #612]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #608]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r11, {r4, r7, r11}
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [r1, #52]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #508
+ adds r0, r0, r9
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #532
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #568]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #564]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #504]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #76] @ 4-byte Reload
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #432
+ bl .LmulPv512x32(PLT)
+ ldr r1, [sp, #496]
+ add r11, sp, #440
+ ldr r6, [sp, #460]
+ ldr r5, [sp, #456]
+ ldr r8, [sp, #452]
+ ldr r9, [sp, #432]
+ ldr r10, [sp, #436]
+ add r0, sp, #360
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #492]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #488]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #484]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #480]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #476]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #472]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #468]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #464]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r11, {r4, r7, r11}
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [r1, #56]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #364
+ adds r0, r0, r9
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #388
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #424]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #420]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #360]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #76] @ 4-byte Reload
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #288
+ bl .LmulPv512x32(PLT)
+ ldr r1, [sp, #352]
+ add r11, sp, #296
+ ldr r7, [sp, #316]
+ ldr r9, [sp, #288]
+ ldr r5, [sp, #292]
+ add r0, sp, #216
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #348]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #344]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #340]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #336]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #332]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #328]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #324]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #320]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r11, {r4, r6, r8, r10, r11}
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [r1, #60]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #72] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ add lr, sp, #232
+ adds r0, r0, r9
+ add r9, sp, #216
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ adcs r1, r1, r4
+ str r1, [sp, #136] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r8
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #256
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #124] @ 4-byte Reload
+ adcs r1, r1, r7
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #80] @ 4-byte Spill
+ ldm r9, {r4, r7, r9}
+ ldr r5, [sp, #228]
+ adds r8, r0, r4
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r1, r8, r0
+ ldr r0, [sp, #280]
+ str r1, [sp, #64] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #276]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r6, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #136] @ 4-byte Reload
+ adcs r11, r11, r7
+ ldr r7, [sp, #132] @ 4-byte Reload
+ adcs r9, r7, r9
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adcs r5, r7, r5
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r10, r0, r1
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ add r0, sp, #144
+ bl .LmulPv512x32(PLT)
+ add r3, sp, #144
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r8, r0
+ adcs r7, r11, r1
+ ldr r0, [sp, #160]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r8, r9, r2
+ str r7, [sp, #56] @ 4-byte Spill
+ adcs r5, r5, r3
+ mov r3, r6
+ str r8, [sp, #64] @ 4-byte Spill
+ str r5, [sp, #72] @ 4-byte Spill
+ adcs r4, r1, r0
+ ldr r0, [sp, #164]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r4, [sp, #76] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #168]
+ adcs lr, r1, r0
+ ldr r0, [sp, #172]
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str lr, [sp, #52] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #176]
+ adcs r0, r1, r0
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #180]
+ adcs r0, r1, r0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #184]
+ adcs r0, r1, r0
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ adcs r0, r1, r0
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ adcs r0, r1, r0
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #196]
+ adcs r0, r1, r0
+ ldr r1, [sp, #136] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #200]
+ adcs r0, r10, r0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #204]
+ adcs r0, r1, r0
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #208]
+ adcs r0, r1, r0
+ ldr r1, [r3]
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adc r0, r0, #0
+ subs r12, r7, r1
+ str r0, [sp, #68] @ 4-byte Spill
+ ldmib r3, {r0, r2, r6}
+ ldr r1, [r3, #32]
+ ldr r11, [r3, #40]
+ ldr r9, [r3, #28]
+ sbcs r7, r8, r0
+ ldr r0, [r3, #36]
+ sbcs r5, r5, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ sbcs r10, r4, r6
+ ldr r6, [r3, #20]
+ ldr r4, [r3, #24]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r3, #60]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ sbcs r2, r2, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ sbcs r3, lr, r6
+ ldr r6, [sp, #64] @ 4-byte Reload
+ sbcs lr, r0, r4
+ ldr r0, [sp, #104] @ 4-byte Reload
+ sbcs r4, r0, r9
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs r8, r0, r1
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #44] @ 4-byte Reload
+ sbcs r9, r0, r1
+ ldr r0, [sp, #116] @ 4-byte Reload
+ ldr r1, [sp, #60] @ 4-byte Reload
+ sbcs r11, r0, r11
+ ldr r0, [sp, #120] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ sbcs r0, r0, r1
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ sbc r0, r0, #0
+ ands r1, r0, #1
+ ldr r0, [sp, #56] @ 4-byte Reload
+ movne r7, r6
+ movne r12, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r12, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #72] @ 4-byte Reload
+ movne r5, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ cmp r1, #0
+ str r5, [r0, #8]
+ movne r10, r7
+ ldr r7, [sp, #96] @ 4-byte Reload
+ str r10, [r0, #12]
+ movne r2, r7
+ str r2, [r0, #16]
+ ldr r2, [sp, #52] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #20]
+ ldr r3, [sp, #60] @ 4-byte Reload
+ movne lr, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str lr, [r0, #24]
+ movne r4, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ str r4, [r0, #28]
+ movne r8, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ cmp r1, #0
+ str r8, [r0, #32]
+ movne r9, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r9, [r0, #36]
+ movne r11, r2
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str r11, [r0, #40]
+ movne r3, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #44]
+ ldr r3, [sp, #80] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #128] @ 4-byte Reload
+ str r3, [r0, #48]
+ ldr r3, [sp, #84] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #132] @ 4-byte Reload
+ str r3, [r0, #52]
+ ldr r3, [sp, #88] @ 4-byte Reload
+ movne r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [sp, #140] @ 4-byte Reload
+ str r3, [r0, #56]
+ movne r2, r1
+ str r2, [r0, #60]
+ add sp, sp, #404
+ add sp, sp, #2048
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end242:
+ .size mcl_fp_mont16L, .Lfunc_end242-mcl_fp_mont16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF16L
+ .align 2
+ .type mcl_fp_montNF16L,%function
+mcl_fp_montNF16L: @ @mcl_fp_montNF16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #396
+ sub sp, sp, #396
+ .pad #2048
+ sub sp, sp, #2048
+ add r12, sp, #124
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #92] @ 4-byte Spill
+ add r0, sp, #2368
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #120] @ 4-byte Spill
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #2368]
+ ldr r1, [sp, #2372]
+ add r9, sp, #2048
+ str r0, [sp, #68] @ 4-byte Spill
+ mul r2, r0, r5
+ ldr r0, [sp, #2432]
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #2376]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #2428]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #2380]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #2424]
+ str r1, [sp, #80] @ 4-byte Spill
+ mov r1, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #2420]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #2416]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #2412]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #2408]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #2404]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #2400]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #2396]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #2392]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2388]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2384]
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, r9, #248
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #2360]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r5, [sp, #2324]
+ ldr r6, [sp, #2320]
+ ldr r7, [sp, #2316]
+ ldr r8, [sp, #2312]
+ ldr r10, [sp, #2296]
+ ldr r11, [sp, #2300]
+ ldr r4, [sp, #2304]
+ ldr r9, [sp, #2308]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2356]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2352]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2348]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2344]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2340]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2336]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2332]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2328]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, sp, #2224
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #68] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #2236]
+ ldr r3, [sp, #2240]
+ ldr r12, [sp, #2244]
+ ldr lr, [sp, #2248]
+ adds r0, r10, r0
+ ldr r10, [sp, #2268]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r11, [sp, #88] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #2252]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #2264]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #2260]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #2224]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #2256]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adc r0, r1, r0
+ adds r6, r11, r6
+ ldr r1, [sp, #2232]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2288]
+ str r6, [sp, #20] @ 4-byte Spill
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2284]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2280]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #2276]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #2272]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #2228]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #2048
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r4, #104
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #2216]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #2180]
+ ldr r7, [sp, #2176]
+ ldr r5, [sp, #2172]
+ ldr r8, [sp, #2168]
+ ldr r9, [sp, #2152]
+ ldr r10, [sp, #2156]
+ ldr r11, [sp, #2160]
+ ldr r4, [sp, #2164]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2212]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2208]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2204]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2200]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2196]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2192]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2188]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2184]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #2080
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #20] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #2092]
+ ldr r3, [sp, #2096]
+ ldr r12, [sp, #2100]
+ ldr lr, [sp, #2104]
+ adds r0, r0, r9
+ ldr r9, [sp, #2120]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #2124]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #2108]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #2116]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #2112]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #2080]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #2088]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2144]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2140]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2136]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2132]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2128]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2084]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r4, #984
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #2072]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #2036]
+ ldr r7, [sp, #2032]
+ ldr r5, [sp, #2028]
+ ldr r8, [sp, #2024]
+ ldr r9, [sp, #2008]
+ ldr r10, [sp, #2012]
+ ldr r11, [sp, #2016]
+ ldr r4, [sp, #2020]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2068]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2064]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2060]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2056]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2052]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2048]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2044]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2040]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #1936
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1948]
+ ldr r3, [sp, #1952]
+ ldr r12, [sp, #1956]
+ ldr lr, [sp, #1960]
+ adds r0, r0, r9
+ ldr r9, [sp, #1976]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1980]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1964]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1972]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1968]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1936]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1944]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2000]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1996]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1992]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1988]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1984]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1940]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r4, #840
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1928]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1892]
+ ldr r7, [sp, #1888]
+ ldr r5, [sp, #1884]
+ ldr r8, [sp, #1880]
+ ldr r9, [sp, #1864]
+ ldr r10, [sp, #1868]
+ ldr r11, [sp, #1872]
+ ldr r4, [sp, #1876]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1924]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1920]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1916]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1912]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1908]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1904]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1900]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1896]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #1792
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1804]
+ ldr r3, [sp, #1808]
+ ldr r12, [sp, #1812]
+ ldr lr, [sp, #1816]
+ adds r0, r0, r9
+ ldr r9, [sp, #1832]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1836]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1820]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1828]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1824]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1792]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1800]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1856]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1852]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1848]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1844]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1840]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1796]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r4, #696
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1784]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1748]
+ ldr r7, [sp, #1744]
+ ldr r5, [sp, #1740]
+ ldr r8, [sp, #1736]
+ ldr r9, [sp, #1720]
+ ldr r10, [sp, #1724]
+ ldr r11, [sp, #1728]
+ ldr r4, [sp, #1732]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1780]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1776]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1772]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1768]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1764]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1760]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1756]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1752]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #1648
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1660]
+ ldr r3, [sp, #1664]
+ ldr r12, [sp, #1668]
+ ldr lr, [sp, #1672]
+ adds r0, r0, r9
+ ldr r9, [sp, #1688]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1692]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1676]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1684]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1680]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1648]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1656]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1712]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1708]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1704]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1700]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1696]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1652]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r4, #552
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1640]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1604]
+ ldr r7, [sp, #1600]
+ ldr r5, [sp, #1596]
+ ldr r8, [sp, #1592]
+ ldr r9, [sp, #1576]
+ ldr r10, [sp, #1580]
+ ldr r11, [sp, #1584]
+ ldr r4, [sp, #1588]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1636]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1632]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1628]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1624]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1620]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1616]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1612]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1608]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #1504
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1516]
+ ldr r3, [sp, #1520]
+ ldr r12, [sp, #1524]
+ ldr lr, [sp, #1528]
+ adds r0, r0, r9
+ ldr r9, [sp, #1544]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1548]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1532]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1540]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1536]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1504]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1512]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1568]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1564]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1560]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1556]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1552]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1508]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r4, #408
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1496]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1460]
+ ldr r7, [sp, #1456]
+ ldr r5, [sp, #1452]
+ ldr r8, [sp, #1448]
+ ldr r9, [sp, #1432]
+ ldr r10, [sp, #1436]
+ ldr r11, [sp, #1440]
+ ldr r4, [sp, #1444]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1492]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1488]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1484]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1480]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1476]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1472]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1468]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1464]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #1360
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1372]
+ ldr r3, [sp, #1376]
+ ldr r12, [sp, #1380]
+ ldr lr, [sp, #1384]
+ adds r0, r0, r9
+ ldr r9, [sp, #1400]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1404]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1388]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1396]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1392]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1360]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1368]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1424]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1420]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1416]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1412]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1408]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1364]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r4, #264
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1352]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1316]
+ ldr r7, [sp, #1312]
+ ldr r5, [sp, #1308]
+ ldr r8, [sp, #1304]
+ ldr r9, [sp, #1288]
+ ldr r10, [sp, #1292]
+ ldr r11, [sp, #1296]
+ ldr r4, [sp, #1300]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1348]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1344]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1336]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1332]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1328]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1324]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1320]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #1216
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1228]
+ ldr r3, [sp, #1232]
+ ldr r12, [sp, #1236]
+ ldr lr, [sp, #1240]
+ adds r0, r0, r9
+ ldr r9, [sp, #1256]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1260]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1244]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1252]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1248]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1216]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1224]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1276]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1272]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1268]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1220]
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, r4, #120
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1208]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #1172]
+ ldr r7, [sp, #1168]
+ ldr r5, [sp, #1164]
+ ldr r8, [sp, #1160]
+ ldr r9, [sp, #1144]
+ ldr r10, [sp, #1148]
+ ldr r11, [sp, #1152]
+ ldr r4, [sp, #1156]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1204]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1200]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1184]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1180]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1176]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #1072
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ ldr r2, [sp, #1084]
+ ldr r3, [sp, #1088]
+ ldr r12, [sp, #1092]
+ ldr lr, [sp, #1096]
+ adds r0, r0, r9
+ ldr r9, [sp, #1112]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1116]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ ldr r11, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1100]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1108]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1104]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1072]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r6, r11, r6
+ ldr r1, [sp, #1080]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1132]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1128]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1124]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1120]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1076]
+ adcs r0, r7, r0
+ ldr r7, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r6, r7
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #1000
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1064]
+ add r11, sp, #1000
+ ldr r6, [sp, #1024]
+ ldr r5, [sp, #1020]
+ ldr r8, [sp, #1016]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1032]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1028]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r4, [sp, #1012]
+ ldr r2, [r0, #40]
+ add r0, sp, #928
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ add lr, sp, #944
+ adds r0, r0, r9
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r2, r0, r10
+ ldr r0, [sp, #112] @ 4-byte Reload
+ add r10, sp, #968
+ adcs r0, r0, r11
+ ldr r11, [sp, #932]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #940]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #928]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #936]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #52] @ 4-byte Spill
+ adds r0, r2, r5
+ mul r1, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r1, [sp, #44] @ 4-byte Spill
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #988]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #984]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r7, r7, r11
+ str r7, [sp, #112] @ 4-byte Spill
+ ldr r7, [sp, #108] @ 4-byte Reload
+ adcs r6, r7, r6
+ str r6, [sp, #108] @ 4-byte Spill
+ ldr r6, [sp, #104] @ 4-byte Reload
+ adcs r6, r6, r8
+ str r6, [sp, #104] @ 4-byte Spill
+ ldr r6, [sp, #100] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #856
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #920]
+ add r11, sp, #856
+ ldr r4, [sp, #884]
+ ldr r7, [sp, #880]
+ ldr r5, [sp, #876]
+ ldr r6, [sp, #872]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #916]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #912]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #908]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #904]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #900]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #896]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #888]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #784
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #116] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #800
+ adds r0, r0, r8
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r2, r0, r9
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #824
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #788]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #792]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #796]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #784]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, r1
+ adds r1, r2, r4
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ str r1, [sp, #116] @ 4-byte Spill
+ mul r2, r1, r0
+ ldr r0, [sp, #848]
+ str r2, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #844]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #840]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #112] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #68] @ 4-byte Spill
+ ldr r6, [sp, #108] @ 4-byte Reload
+ adcs r5, r6, r5
+ str r5, [sp, #64] @ 4-byte Spill
+ ldr r5, [sp, #104] @ 4-byte Reload
+ adcs r5, r5, r7
+ str r5, [sp, #60] @ 4-byte Spill
+ ldr r5, [sp, #100] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #712
+ bl .LmulPv512x32(PLT)
+ ldr r1, [sp, #776]
+ ldr r11, [sp, #740]
+ ldr r8, [sp, #736]
+ ldr r9, [sp, #732]
+ ldr r10, [sp, #728]
+ ldr r6, [sp, #712]
+ ldr r7, [sp, #716]
+ ldr r5, [sp, #720]
+ ldr r4, [sp, #724]
+ add r0, sp, #640
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #772]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #768]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #764]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #760]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #756]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #752]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #748]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #744]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #116] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #644
+ adds r0, r0, r6
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #668
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #688]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #640]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #116] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #568
+ bl .LmulPv512x32(PLT)
+ ldr r1, [sp, #632]
+ ldr r6, [sp, #596]
+ ldr r7, [sp, #592]
+ ldr r8, [sp, #588]
+ ldr r5, [sp, #584]
+ ldr r9, [sp, #568]
+ ldr r10, [sp, #572]
+ ldr r4, [sp, #576]
+ ldr r11, [sp, #580]
+ add r0, sp, #496
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #628]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #624]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #620]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #616]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #612]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #608]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #604]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #600]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [r1, #52]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #500
+ adds r0, r0, r9
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #524
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #548]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #496]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #72] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #424
+ bl .LmulPv512x32(PLT)
+ ldr r1, [sp, #488]
+ ldr r6, [sp, #452]
+ ldr r7, [sp, #448]
+ ldr r8, [sp, #444]
+ ldr r5, [sp, #440]
+ ldr r9, [sp, #424]
+ ldr r10, [sp, #428]
+ ldr r4, [sp, #432]
+ ldr r11, [sp, #436]
+ add r0, sp, #352
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #484]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #480]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #476]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #472]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #468]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #464]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #460]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #456]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [r1, #56]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #356
+ adds r0, r0, r9
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #380
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #404]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #400]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r6, [sp, #352]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #72] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adds r6, r11, r6
+ adcs r0, r7, r0
+ str r6, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r2, r6, r0
+ add r0, sp, #280
+ bl .LmulPv512x32(PLT)
+ ldr r1, [sp, #344]
+ add r11, sp, #284
+ ldr r8, [sp, #308]
+ ldr r9, [sp, #304]
+ ldr r10, [sp, #300]
+ ldr r7, [sp, #280]
+ add r0, sp, #208
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #340]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #336]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #332]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #328]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #324]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #320]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #316]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #312]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r11}
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [r1, #60]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #68] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #224
+ adds r0, r0, r7
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r4
+ adcs r1, r1, r5
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #248
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r1, r8
+ add r8, sp, #208
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #96] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adc r1, r1, r2
+ str r1, [sp, #76] @ 4-byte Spill
+ ldm r8, {r4, r5, r6, r8}
+ adds r9, r0, r4
+ ldr r0, [sp, #120] @ 4-byte Reload
+ mul r1, r9, r0
+ ldr r0, [sp, #272]
+ str r1, [sp, #60] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #264]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r10, {r4, r7, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #128] @ 4-byte Reload
+ adcs r11, r11, r5
+ ldr r5, [sp, #124] @ 4-byte Reload
+ adcs r6, r5, r6
+ ldr r5, [sp, #72] @ 4-byte Reload
+ adcs r8, r5, r8
+ ldr r5, [sp, #68] @ 4-byte Reload
+ adcs r5, r5, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r7
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r10, r0, #0
+ add r0, sp, #136
+ bl .LmulPv512x32(PLT)
+ add r3, sp, #136
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r9, r0
+ ldr r0, [sp, #152]
+ adcs r4, r11, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r6, r6, r2
+ str r4, [sp, #48] @ 4-byte Spill
+ adcs lr, r8, r3
+ mov r3, r7
+ str r6, [sp, #56] @ 4-byte Spill
+ str lr, [sp, #60] @ 4-byte Spill
+ adcs r5, r5, r0
+ ldr r0, [sp, #156]
+ str r5, [sp, #68] @ 4-byte Spill
+ adcs r9, r1, r0
+ ldr r0, [sp, #160]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #164]
+ adcs r0, r1, r0
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #168]
+ adcs r0, r1, r0
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #172]
+ adcs r0, r1, r0
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #176]
+ adcs r0, r1, r0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #180]
+ adcs r0, r1, r0
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #184]
+ adcs r0, r1, r0
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ adcs r0, r1, r0
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ adcs r0, r1, r0
+ ldr r1, [sp, #104] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #196]
+ adcs r0, r1, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #200]
+ adc r10, r10, r0
+ ldm r3, {r0, r7}
+ ldr r1, [r3, #8]
+ ldr r2, [r3, #12]
+ subs r12, r4, r0
+ ldr r0, [r3, #32]
+ sbcs r4, r6, r7
+ ldr r7, [r3, #60]
+ sbcs r6, lr, r1
+ add lr, r3, #16
+ ldr r1, [r3, #28]
+ sbcs r8, r5, r2
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldm lr, {r0, r5, lr}
+ ldr r11, [sp, #84] @ 4-byte Reload
+ sbcs r2, r9, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ sbcs r3, r0, r5
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs lr, r11, lr
+ sbcs r5, r0, r1
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #64] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ sbcs r0, r0, r1
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ sbc r1, r10, r7
+ ldr r7, [sp, #56] @ 4-byte Reload
+ cmp r1, #0
+ movlt r12, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ movlt r4, r7
+ ldr r7, [sp, #52] @ 4-byte Reload
+ str r12, [r0]
+ str r4, [r0, #4]
+ ldr r4, [sp, #60] @ 4-byte Reload
+ ldr r12, [sp, #64] @ 4-byte Reload
+ movlt r6, r4
+ cmp r1, #0
+ ldr r4, [sp, #88] @ 4-byte Reload
+ str r6, [r0, #8]
+ ldr r6, [sp, #68] @ 4-byte Reload
+ movlt r2, r9
+ movlt r8, r6
+ ldr r6, [sp, #76] @ 4-byte Reload
+ str r8, [r0, #12]
+ str r2, [r0, #16]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ cmp r1, #0
+ movlt lr, r11
+ str r3, [r0, #20]
+ ldr r3, [sp, #132] @ 4-byte Reload
+ str lr, [r0, #24]
+ ldr lr, [sp, #72] @ 4-byte Reload
+ movlt r5, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ str r5, [r0, #28]
+ ldr r5, [sp, #80] @ 4-byte Reload
+ movlt r12, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ cmp r1, #0
+ str r12, [r0, #32]
+ movlt lr, r2
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str lr, [r0, #36]
+ movlt r6, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r6, [r0, #40]
+ movlt r5, r2
+ ldr r2, [sp, #128] @ 4-byte Reload
+ cmp r1, #0
+ str r5, [r0, #44]
+ movlt r4, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ str r4, [r0, #48]
+ movlt r3, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r3, [r0, #52]
+ movlt r7, r2
+ cmp r1, #0
+ movlt r1, r10
+ str r7, [r0, #56]
+ str r1, [r0, #60]
+ add sp, sp, #396
+ add sp, sp, #2048
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end243:
+ .size mcl_fp_montNF16L, .Lfunc_end243-mcl_fp_montNF16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed16L
+ .align 2
+ .type mcl_fp_montRed16L,%function
+mcl_fp_montRed16L: @ @mcl_fp_montRed16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #356
+ sub sp, sp, #356
+ .pad #1024
+ sub sp, sp, #1024
+ mov r3, r2
+ str r0, [sp, #200] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r7, [r1]
+ add r10, sp, #1024
+ ldr r0, [r3]
+ str r3, [sp, #216] @ 4-byte Spill
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #192] @ 4-byte Spill
+ ldr r0, [r3, #4]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #188] @ 4-byte Spill
+ ldr r0, [r3, #8]
+ str r2, [sp, #104] @ 4-byte Spill
+ str r0, [sp, #184] @ 4-byte Spill
+ ldr r0, [r3, #12]
+ str r0, [sp, #168] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ str r0, [sp, #172] @ 4-byte Spill
+ ldr r0, [r3, #20]
+ str r0, [sp, #176] @ 4-byte Spill
+ ldr r0, [r3, #24]
+ str r0, [sp, #180] @ 4-byte Spill
+ ldr r0, [r3, #-4]
+ str r0, [sp, #220] @ 4-byte Spill
+ mul r2, r7, r0
+ ldr r0, [r3, #60]
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #144] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #148] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #152] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #156] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #160] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #164] @ 4-byte Spill
+ ldr r0, [r3, #28]
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [r1, #96]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [r1, #100]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [r1, #104]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [r1, #108]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [r1, #112]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [r1, #116]
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [r1, #120]
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [r1, #124]
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #72]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r1, #76]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #80]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #84]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r1, #88]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r1, #92]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ mov r1, r3
+ str r0, [sp, #16] @ 4-byte Spill
+ add r0, r10, #280
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1368]
+ ldr r10, [sp, #1304]
+ ldr r1, [sp, #1312]
+ ldr r2, [sp, #1316]
+ ldr r3, [sp, #1320]
+ ldr r12, [sp, #1324]
+ ldr lr, [sp, #1328]
+ ldr r4, [sp, #1332]
+ ldr r5, [sp, #1336]
+ ldr r6, [sp, #1340]
+ ldr r8, [sp, #1344]
+ ldr r9, [sp, #1348]
+ ldr r11, [sp, #1352]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1364]
+ adds r7, r7, r10
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1360]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1356]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1308]
+ adcs r10, r7, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #16] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ mul r2, r10, r0
+ add r0, sp, #1232
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1296]
+ ldr r4, [sp, #1232]
+ ldr r1, [sp, #1240]
+ ldr r2, [sp, #1244]
+ ldr r3, [sp, #1248]
+ ldr r9, [sp, #1252]
+ ldr r12, [sp, #1256]
+ ldr r11, [sp, #1260]
+ ldr lr, [sp, #1264]
+ ldr r6, [sp, #1268]
+ ldr r7, [sp, #1272]
+ ldr r8, [sp, #1276]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1292]
+ adds r4, r10, r4
+ ldr r4, [sp, #112] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1288]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1284]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #1236]
+ adcs r10, r4, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r4, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #1024
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #24] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ add r0, r8, #136
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1224]
+ add r12, sp, #1168
+ ldr r9, [sp, #1204]
+ ldr r7, [sp, #1200]
+ ldr r6, [sp, #1196]
+ ldr r5, [sp, #1192]
+ ldr lr, [sp, #1188]
+ ldr r10, [sp, #1184]
+ ldr r8, [sp, #1164]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1220]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1216]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1212]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1208]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1160]
+ ldm r12, {r1, r2, r3, r12}
+ adds r0, r11, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r8, r0, r8
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r8, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ mov r10, r8
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #1088
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1152]
+ add r9, sp, #1120
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1140]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r6, r7, r8, r9}
+ ldr r4, [sp, #1088]
+ ldr r0, [sp, #1092]
+ ldr r1, [sp, #1096]
+ ldr r2, [sp, #1100]
+ ldr r3, [sp, #1104]
+ ldr r12, [sp, #1108]
+ ldr lr, [sp, #1112]
+ ldr r11, [sp, #1116]
+ adds r4, r10, r4
+ ldr r4, [sp, #112] @ 4-byte Reload
+ adcs r10, r4, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r4, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ mov r8, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, sp, #1016
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1080]
+ add r11, sp, #1016
+ ldr r6, [sp, #1060]
+ ldr r7, [sp, #1056]
+ ldr r5, [sp, #1052]
+ ldr lr, [sp, #1048]
+ ldr r12, [sp, #1044]
+ ldr r10, [sp, #1040]
+ ldr r9, [sp, #1036]
+ ldr r3, [sp, #1032]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1076]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1072]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r11, {r0, r1, r2, r11}
+ adds r0, r8, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r1, r0, r1
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ mov r10, r1
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r1, r4
+ mov r1, r5
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #944
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #1008]
+ add r9, sp, #976
+ add lr, sp, #948
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1004]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1000]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #996]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r9, {r6, r7, r8, r9}
+ ldr r4, [sp, #944]
+ ldr r11, [sp, #972]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r4, r10, r4
+ ldr r4, [sp, #112] @ 4-byte Reload
+ adcs r10, r4, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r4, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r10, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r10
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, sp, #872
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #936]
+ add lr, sp, #888
+ add r8, sp, #872
+ ldr r6, [sp, #916]
+ ldr r7, [sp, #912]
+ ldr r5, [sp, #908]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #932]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #928]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #924]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm lr, {r3, r9, r10, r12, lr}
+ ldm r8, {r0, r1, r2, r8}
+ adds r0, r11, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #800
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #864]
+ add r10, sp, #828
+ add lr, sp, #804
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #860]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #856]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #852]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #848]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldr r4, [sp, #800]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r4, r11, r4
+ ldr r4, [sp, #112] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r4, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #728
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #792]
+ add r8, sp, #760
+ add lr, sp, #748
+ add r12, sp, #728
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #788]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #784]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #780]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #776]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r8, {r5, r6, r7, r8}
+ ldm lr, {r9, r10, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ adds r0, r11, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #656
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #720]
+ add r10, sp, #684
+ add lr, sp, #660
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #716]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldr r4, [sp, #656]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r4, r11, r4
+ ldr r4, [sp, #112] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r4, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #584
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #648]
+ add r8, sp, #616
+ add lr, sp, #604
+ add r12, sp, #584
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #644]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #640]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #636]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #632]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r8, {r5, r6, r7, r8}
+ ldm lr, {r9, r10, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ adds r0, r11, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #512
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #576]
+ add r10, sp, #540
+ add lr, sp, #516
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #572]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #568]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #564]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldr r4, [sp, #512]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r4, r11, r4
+ ldr r4, [sp, #112] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r4, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, sp, #440
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #504]
+ add r8, sp, #472
+ add lr, sp, #460
+ add r12, sp, #440
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #500]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #496]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #492]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #488]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldm r8, {r5, r6, r7, r8}
+ ldm lr, {r9, r10, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ adds r0, r11, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #212] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, sp, #368
+ bl .LmulPv512x32(PLT)
+ ldr r0, [sp, #432]
+ add r10, sp, #396
+ add lr, sp, #372
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #428]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #424]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #420]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldr r4, [sp, #368]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r4, r11, r4
+ ldr r4, [sp, #112] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #220] @ 4-byte Reload
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ mul r2, r4, r6
+ adcs r0, r0, r7
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ add r0, sp, #296
+ bl .LmulPv512x32(PLT)
+ add r5, sp, #296
+ add r7, sp, #336
+ add lr, sp, #312
+ ldm r5, {r0, r1, r3, r5}
+ ldr r9, [sp, #356]
+ adds r0, r4, r0
+ adcs r8, r11, r1
+ ldr r11, [sp, #352]
+ mul r0, r8, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #360]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #348]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldm r7, {r4, r6, r7}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r10, [sp, #212] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #208] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #204] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r10, r0, r11
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ add r0, sp, #224
+ bl .LmulPv512x32(PLT)
+ add r3, sp, #224
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r8, r0
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r12, r0, r1
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r12, [sp, #92] @ 4-byte Spill
+ adcs r2, r0, r2
+ ldr r0, [sp, #52] @ 4-byte Reload
+ str r2, [sp, #96] @ 4-byte Spill
+ adcs lr, r0, r3
+ ldr r0, [sp, #240]
+ str lr, [sp, #100] @ 4-byte Spill
+ adcs r4, r1, r0
+ ldr r0, [sp, #244]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r4, [sp, #104] @ 4-byte Spill
+ adcs r5, r1, r0
+ ldr r0, [sp, #248]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r5, [sp, #108] @ 4-byte Spill
+ adcs r7, r1, r0
+ ldr r0, [sp, #252]
+ ldr r1, [sp, #208] @ 4-byte Reload
+ str r7, [sp, #112] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #204] @ 4-byte Reload
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [sp, #256]
+ adcs r0, r1, r0
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ adcs r11, r1, r0
+ ldr r0, [sp, #264]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r11, [sp, #116] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #212] @ 4-byte Reload
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ adcs r0, r1, r0
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ adcs r0, r1, r0
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #276]
+ adcs r10, r10, r0
+ ldr r0, [sp, #280]
+ str r10, [sp, #128] @ 4-byte Spill
+ adcs r8, r1, r0
+ ldr r0, [sp, #284]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r8, [sp, #132] @ 4-byte Spill
+ adcs r6, r6, r0
+ ldr r0, [sp, #288]
+ adcs r3, r1, r0
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #192] @ 4-byte Reload
+ subs r1, r12, r0
+ ldr r0, [sp, #188] @ 4-byte Reload
+ sbcs r2, r2, r0
+ ldr r0, [sp, #184] @ 4-byte Reload
+ sbcs r12, lr, r0
+ ldr r0, [sp, #168] @ 4-byte Reload
+ sbcs lr, r4, r0
+ ldr r0, [sp, #172] @ 4-byte Reload
+ sbcs r4, r5, r0
+ ldr r0, [sp, #176] @ 4-byte Reload
+ sbcs r5, r7, r0
+ ldr r0, [sp, #180] @ 4-byte Reload
+ ldr r7, [sp, #208] @ 4-byte Reload
+ sbcs r9, r7, r0
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r7, [sp, #204] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #212] @ 4-byte Reload
+ str r0, [sp, #172] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ sbcs r0, r11, r0
+ ldr r11, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #176] @ 4-byte Spill
+ ldr r0, [sp, #144] @ 4-byte Reload
+ sbcs r0, r11, r0
+ ldr r11, [sp, #220] @ 4-byte Reload
+ str r0, [sp, #180] @ 4-byte Spill
+ ldr r0, [sp, #148] @ 4-byte Reload
+ sbcs r0, r7, r0
+ str r0, [sp, #184] @ 4-byte Spill
+ ldr r0, [sp, #152] @ 4-byte Reload
+ sbcs r0, r11, r0
+ str r0, [sp, #188] @ 4-byte Spill
+ ldr r0, [sp, #156] @ 4-byte Reload
+ sbcs r0, r10, r0
+ mov r10, r6
+ str r0, [sp, #192] @ 4-byte Spill
+ ldr r0, [sp, #160] @ 4-byte Reload
+ sbcs r7, r8, r0
+ ldr r0, [sp, #164] @ 4-byte Reload
+ mov r8, r3
+ sbcs r11, r6, r0
+ ldr r0, [sp, #196] @ 4-byte Reload
+ sbcs r6, r3, r0
+ ldr r0, [sp, #124] @ 4-byte Reload
+ sbc r3, r0, #0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ands r3, r3, #1
+ movne r1, r0
+ ldr r0, [sp, #200] @ 4-byte Reload
+ str r1, [r0]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #100] @ 4-byte Reload
+ str r2, [r0, #4]
+ ldr r2, [sp, #172] @ 4-byte Reload
+ movne r12, r1
+ ldr r1, [sp, #104] @ 4-byte Reload
+ cmp r3, #0
+ str r12, [r0, #8]
+ movne lr, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str lr, [r0, #12]
+ movne r4, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ str r4, [r0, #16]
+ movne r5, r1
+ ldr r1, [sp, #208] @ 4-byte Reload
+ cmp r3, #0
+ str r5, [r0, #20]
+ movne r9, r1
+ ldr r1, [sp, #204] @ 4-byte Reload
+ str r9, [r0, #24]
+ movne r2, r1
+ ldr r1, [sp, #176] @ 4-byte Reload
+ str r2, [r0, #28]
+ ldr r2, [sp, #116] @ 4-byte Reload
+ movne r1, r2
+ cmp r3, #0
+ ldr r2, [sp, #180] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #216] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #212] @ 4-byte Reload
+ str r2, [r0, #36]
+ ldr r2, [sp, #184] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #220] @ 4-byte Reload
+ str r2, [r0, #40]
+ ldr r2, [sp, #188] @ 4-byte Reload
+ movne r2, r1
+ cmp r3, #0
+ ldr r1, [sp, #192] @ 4-byte Reload
+ str r2, [r0, #44]
+ ldr r2, [sp, #128] @ 4-byte Reload
+ movne r11, r10
+ movne r1, r2
+ str r1, [r0, #48]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ movne r7, r1
+ cmp r3, #0
+ movne r6, r8
+ str r7, [r0, #52]
+ str r11, [r0, #56]
+ str r6, [r0, #60]
+ add sp, sp, #356
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end244:
+ .size mcl_fp_montRed16L, .Lfunc_end244-mcl_fp_montRed16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre16L
+ .align 2
+ .type mcl_fp_addPre16L,%function
+mcl_fp_addPre16L: @ @mcl_fp_addPre16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #64
+ sub sp, sp, #64
+ ldm r1, {r3, r8}
+ ldr r5, [r1, #8]
+ ldr r6, [r1, #12]
+ ldm r2, {r7, r12, lr}
+ ldr r4, [r2, #12]
+ ldr r9, [r1, #32]
+ ldr r11, [r1, #52]
+ adds r3, r7, r3
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #32]
+ adcs r7, r12, r8
+ ldr r8, [r2, #24]
+ add r12, r1, #16
+ adcs r5, lr, r5
+ ldr lr, [r2, #16]
+ adcs r6, r4, r6
+ ldr r4, [r2, #20]
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r3, [sp, #44] @ 4-byte Spill
+ ldr r3, [r2, #48]
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [r2, #52]
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [r2, #56]
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [r2, #60]
+ str r3, [sp, #60] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ ldr r2, [r1, #36]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r3, [sp, #24] @ 4-byte Spill
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldm r12, {r1, r2, r3, r12}
+ ldr r10, [sp, #28] @ 4-byte Reload
+ adcs r1, lr, r1
+ str r10, [r0]
+ str r7, [r0, #4]
+ str r5, [r0, #8]
+ str r6, [r0, #12]
+ adcs r2, r4, r2
+ str r1, [r0, #16]
+ str r2, [r0, #20]
+ adcs r1, r8, r3
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adcs r2, r2, r12
+ adcs r12, r1, r9
+ str r2, [r0, #28]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ ldr r2, [sp] @ 4-byte Reload
+ str r12, [r0, #32]
+ adcs lr, r1, r2
+ ldr r1, [sp, #40] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str lr, [r0, #36]
+ adcs r3, r1, r2
+ ldr r1, [sp, #44] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r3, [r0, #40]
+ adcs r7, r1, r2
+ ldr r1, [sp, #48] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r7, [r0, #44]
+ adcs r6, r1, r2
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r6, [r0, #48]
+ adcs r5, r1, r11
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r5, [r0, #52]
+ adcs r4, r1, r2
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r4, [r0, #56]
+ adcs r1, r1, r2
+ str r1, [r0, #60]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #64
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end245:
+ .size mcl_fp_addPre16L, .Lfunc_end245-mcl_fp_addPre16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre16L
+ .align 2
+ .type mcl_fp_subPre16L,%function
+mcl_fp_subPre16L: @ @mcl_fp_subPre16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #64
+ sub sp, sp, #64
+ ldm r2, {r3, r8}
+ ldr r5, [r2, #8]
+ ldr r6, [r2, #12]
+ ldm r1, {r7, r12, lr}
+ ldr r4, [r1, #12]
+ ldr r9, [r1, #32]
+ ldr r11, [r1, #52]
+ subs r3, r7, r3
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #32]
+ sbcs r7, r12, r8
+ ldr r8, [r2, #24]
+ add r12, r1, #16
+ sbcs r5, lr, r5
+ ldr lr, [r2, #16]
+ sbcs r6, r4, r6
+ ldr r4, [r2, #20]
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r3, [sp, #44] @ 4-byte Spill
+ ldr r3, [r2, #48]
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [r2, #52]
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [r2, #56]
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [r2, #60]
+ str r3, [sp, #60] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ ldr r2, [r1, #36]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r3, [sp, #24] @ 4-byte Spill
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldm r12, {r1, r2, r3, r12}
+ ldr r10, [sp, #28] @ 4-byte Reload
+ sbcs r1, r1, lr
+ str r10, [r0]
+ str r7, [r0, #4]
+ str r5, [r0, #8]
+ str r6, [r0, #12]
+ sbcs r2, r2, r4
+ str r1, [r0, #16]
+ str r2, [r0, #20]
+ sbcs r1, r3, r8
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #32] @ 4-byte Reload
+ sbcs r2, r12, r2
+ sbcs r12, r9, r1
+ str r2, [r0, #28]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ ldr r2, [sp] @ 4-byte Reload
+ str r12, [r0, #32]
+ sbcs lr, r2, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str lr, [r0, #36]
+ sbcs r3, r2, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ ldr r2, [sp, #8] @ 4-byte Reload
+ str r3, [r0, #40]
+ sbcs r7, r2, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r7, [r0, #44]
+ sbcs r6, r2, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r6, [r0, #48]
+ sbcs r5, r11, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r5, [r0, #52]
+ sbcs r4, r2, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r4, [r0, #56]
+ sbcs r1, r2, r1
+ str r1, [r0, #60]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #64
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end246:
+ .size mcl_fp_subPre16L, .Lfunc_end246-mcl_fp_subPre16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_16L
+ .align 2
+ .type mcl_fp_shr1_16L,%function
+mcl_fp_shr1_16L: @ @mcl_fp_shr1_16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #24
+ sub sp, sp, #24
+ ldr r3, [r1, #16]
+ ldr r2, [r1, #20]
+ ldr r12, [r1, #24]
+ ldr r11, [r1, #28]
+ ldm r1, {r4, r5, r6, r7}
+ ldr r8, [r1, #56]
+ ldr lr, [r1, #32]
+ ldr r9, [r1, #36]
+ ldr r10, [r1, #40]
+ str r4, [sp, #4] @ 4-byte Spill
+ lsr r4, r5, #1
+ str r8, [sp, #16] @ 4-byte Spill
+ orr r4, r4, r6, lsl #31
+ str r4, [sp] @ 4-byte Spill
+ ldr r4, [r1, #44]
+ str r4, [sp, #8] @ 4-byte Spill
+ ldr r4, [r1, #48]
+ str r4, [sp, #12] @ 4-byte Spill
+ ldr r4, [r1, #52]
+ ldr r1, [r1, #60]
+ str r1, [sp, #20] @ 4-byte Spill
+ lsr r1, r7, #1
+ lsrs r7, r7, #1
+ rrx r6, r6
+ lsrs r5, r5, #1
+ orr r1, r1, r3, lsl #31
+ ldr r5, [sp, #4] @ 4-byte Reload
+ rrx r5, r5
+ str r5, [r0]
+ ldr r5, [sp] @ 4-byte Reload
+ stmib r0, {r5, r6}
+ str r1, [r0, #12]
+ lsrs r1, r2, #1
+ rrx r1, r3
+ str r1, [r0, #16]
+ lsr r1, r2, #1
+ lsr r2, r9, #1
+ orr r1, r1, r12, lsl #31
+ str r1, [r0, #20]
+ lsrs r1, r11, #1
+ rrx r1, r12
+ str r1, [r0, #24]
+ lsr r1, r11, #1
+ orr r1, r1, lr, lsl #31
+ str r1, [r0, #28]
+ lsrs r1, r9, #1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ rrx r12, lr
+ orr lr, r2, r10, lsl #31
+ mov r2, r4
+ lsr r5, r2, #1
+ str r12, [r0, #32]
+ str lr, [r0, #36]
+ lsrs r3, r1, #1
+ lsr r7, r1, #1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ rrx r3, r10
+ lsrs r6, r2, #1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r3, [r0, #40]
+ orr r7, r7, r1, lsl #31
+ rrx r6, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ orr r5, r5, r2, lsl #31
+ str r7, [r0, #44]
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ lsrs r4, r1, #1
+ lsr r1, r1, #1
+ rrx r4, r2
+ str r4, [r0, #56]
+ str r1, [r0, #60]
+ add sp, sp, #24
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end247:
+ .size mcl_fp_shr1_16L, .Lfunc_end247-mcl_fp_shr1_16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add16L
+ .align 2
+ .type mcl_fp_add16L,%function
+mcl_fp_add16L: @ @mcl_fp_add16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #60
+ sub sp, sp, #60
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r7}
+ adds r9, r4, r9
+ ldr r4, [r1, #24]
+ adcs r5, r5, r8
+ mov r8, r9
+ adcs r6, r6, lr
+ str r5, [sp, #36] @ 4-byte Spill
+ ldr r5, [r1, #20]
+ str r8, [r0]
+ adcs r10, r7, r12
+ str r6, [sp, #32] @ 4-byte Spill
+ ldr r6, [r1, #16]
+ ldr r7, [r2, #16]
+ ldr lr, [sp, #36] @ 4-byte Reload
+ str r10, [sp] @ 4-byte Spill
+ adcs r7, r7, r6
+ ldr r6, [r1, #28]
+ str lr, [r0, #4]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r9, [sp, #28] @ 4-byte Reload
+ adcs r7, r7, r5
+ ldr r5, [r2, #28]
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ adcs r7, r7, r4
+ ldr r4, [r2, #32]
+ str r7, [sp, #52] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r5, [r1, #32]
+ str r7, [sp, #40] @ 4-byte Spill
+ adcs r7, r4, r5
+ ldr r5, [r1, #36]
+ ldr r4, [r2, #36]
+ str r7, [sp, #48] @ 4-byte Spill
+ adcs r7, r4, r5
+ ldr r5, [r1, #40]
+ ldr r4, [r2, #40]
+ str r7, [sp, #56] @ 4-byte Spill
+ adcs r7, r4, r5
+ ldr r5, [r1, #44]
+ ldr r4, [r2, #44]
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ adcs r11, r4, r5
+ ldr r4, [r1, #48]
+ ldr r5, [r2, #52]
+ str r11, [sp, #20] @ 4-byte Spill
+ adcs r12, r7, r4
+ ldr r7, [r1, #52]
+ ldr r4, [sp, #32] @ 4-byte Reload
+ str r12, [sp, #16] @ 4-byte Spill
+ adcs r6, r5, r7
+ ldr r7, [r1, #56]
+ ldr r5, [r2, #56]
+ ldr r1, [r1, #60]
+ ldr r2, [r2, #60]
+ str r4, [r0, #8]
+ str r10, [r0, #12]
+ ldr r10, [sp, #24] @ 4-byte Reload
+ str r9, [r0, #16]
+ str r6, [sp, #4] @ 4-byte Spill
+ adcs r5, r5, r7
+ str r10, [r0, #20]
+ add r7, r0, #40
+ adcs r2, r2, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r2, [sp, #8] @ 4-byte Spill
+ str r1, [r0, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r1, [r0, #28]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r1, [r0, #36]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ stm r7, {r1, r11, r12}
+ str r6, [r0, #52]
+ str r5, [r0, #56]
+ str r2, [r0, #60]
+ mov r2, #0
+ mov r12, r5
+ add r11, r3, #32
+ adc r1, r2, #0
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r3, {r5, r7}
+ ldr r1, [r3, #8]
+ ldr r2, [r3, #12]
+ subs r8, r8, r5
+ sbcs lr, lr, r7
+ sbcs r1, r4, r1
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp] @ 4-byte Reload
+ sbcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ sbcs r1, r9, r1
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ sbcs r1, r10, r1
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [r3, #24]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ sbcs r1, r2, r1
+ str r1, [sp, #40] @ 4-byte Spill
+ ldm r11, {r1, r2, r5, r7, r9, r10, r11}
+ ldr r6, [sp, #48] @ 4-byte Reload
+ ldr r3, [r3, #60]
+ sbcs r1, r6, r1
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ sbcs r1, r1, r2
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #44] @ 4-byte Reload
+ sbcs r2, r1, r5
+ ldr r1, [sp, #20] @ 4-byte Reload
+ sbcs r5, r1, r7
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r7, r1, r9
+ ldr r1, [sp, #4] @ 4-byte Reload
+ sbcs r4, r1, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ sbcs r6, r12, r11
+ sbcs r1, r1, r3
+ ldr r3, [sp, #12] @ 4-byte Reload
+ sbc r3, r3, #0
+ tst r3, #1
+ bne .LBB248_2
+@ BB#1: @ %nocarry
+ stm r0, {r8, lr}
+ ldr r3, [sp, #36] @ 4-byte Reload
+ str r3, [r0, #8]
+ ldr r3, [sp, #32] @ 4-byte Reload
+ str r3, [r0, #12]
+ ldr r3, [sp, #28] @ 4-byte Reload
+ str r3, [r0, #16]
+ ldr r3, [sp, #24] @ 4-byte Reload
+ str r3, [r0, #20]
+ ldr r3, [sp, #52] @ 4-byte Reload
+ str r3, [r0, #24]
+ ldr r3, [sp, #40] @ 4-byte Reload
+ str r3, [r0, #28]
+ ldr r3, [sp, #48] @ 4-byte Reload
+ str r3, [r0, #32]
+ ldr r3, [sp, #56] @ 4-byte Reload
+ str r3, [r0, #36]
+ add r3, r0, #40
+ stm r3, {r2, r5, r7}
+ str r4, [r0, #52]
+ str r6, [r0, #56]
+ str r1, [r0, #60]
+.LBB248_2: @ %carry
+ add sp, sp, #60
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end248:
+ .size mcl_fp_add16L, .Lfunc_end248-mcl_fp_add16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF16L
+ .align 2
+ .type mcl_fp_addNF16L,%function
+mcl_fp_addNF16L: @ @mcl_fp_addNF16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #88
+ sub sp, sp, #88
+ mov r12, r0
+ ldm r1, {r0, r9}
+ ldr r8, [r1, #8]
+ ldr lr, [r1, #12]
+ ldm r2, {r4, r5, r6, r7}
+ adds r10, r4, r0
+ ldr r4, [r1, #20]
+ ldr r0, [r1, #24]
+ adcs r9, r5, r9
+ ldr r5, [r1, #16]
+ adcs r8, r6, r8
+ str r9, [sp, #4] @ 4-byte Spill
+ adcs r6, r7, lr
+ ldr r7, [r2, #16]
+ str r8, [sp, #8] @ 4-byte Spill
+ str r6, [sp, #16] @ 4-byte Spill
+ adcs r7, r7, r5
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ adcs r7, r7, r4
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ adcs r0, r7, r0
+ ldr r7, [r2, #28]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ adcs r0, r7, r0
+ ldr r7, [r2, #32]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ adcs r0, r7, r0
+ ldr r7, [r2, #36]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ adcs r0, r7, r0
+ ldr r7, [r2, #40]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ adcs r0, r7, r0
+ ldr r7, [r2, #44]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ adcs r0, r7, r0
+ ldr r7, [r2, #48]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ adcs r0, r7, r0
+ ldr r7, [r2, #52]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ adcs r0, r7, r0
+ ldr r7, [r2, #56]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ adcs r0, r7, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ ldr r1, [r2, #60]
+ adc r11, r1, r0
+ ldm r3, {r0, r7}
+ ldr r1, [r3, #8]
+ ldr r4, [r3, #12]
+ subs lr, r10, r0
+ ldr r0, [r3, #32]
+ sbcs r5, r9, r7
+ ldr r9, [sp, #44] @ 4-byte Reload
+ sbcs r7, r8, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ sbcs r8, r6, r4
+ ldr r4, [r3, #24]
+ ldr r6, [r3, #20]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r3, #60]
+ str r0, [sp] @ 4-byte Spill
+ ldr r0, [r3, #28]
+ ldr r3, [r3, #16]
+ sbcs r1, r1, r3
+ ldr r3, [sp, #48] @ 4-byte Reload
+ sbcs r2, r9, r6
+ ldr r6, [sp, #12] @ 4-byte Reload
+ sbcs r3, r3, r4
+ ldr r4, [sp, #84] @ 4-byte Reload
+ sbcs r4, r4, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ sbcs r0, r0, r6
+ ldr r6, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r0, r0, r6
+ ldr r6, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbcs r0, r0, r6
+ ldr r6, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ sbcs r0, r0, r6
+ ldr r6, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r0, r6
+ ldr r6, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ sbcs r0, r0, r6
+ ldr r6, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ sbcs r0, r0, r6
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp] @ 4-byte Reload
+ sbc r0, r11, r0
+ cmp r0, #0
+ movlt lr, r10
+ movlt r5, r6
+ ldr r6, [sp, #28] @ 4-byte Reload
+ str lr, [r12]
+ str r5, [r12, #4]
+ ldr r5, [sp, #8] @ 4-byte Reload
+ ldr lr, [sp, #12] @ 4-byte Reload
+ movlt r7, r5
+ cmp r0, #0
+ ldr r5, [sp, #32] @ 4-byte Reload
+ str r7, [r12, #8]
+ ldr r7, [sp, #16] @ 4-byte Reload
+ movlt r2, r9
+ movlt r8, r7
+ ldr r7, [sp, #52] @ 4-byte Reload
+ str r8, [r12, #12]
+ movlt r1, r7
+ cmp r0, #0
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r1, [r12, #16]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r2, [r12, #20]
+ ldr r2, [sp, #40] @ 4-byte Reload
+ movlt r3, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r3, [r12, #24]
+ ldr r3, [sp, #20] @ 4-byte Reload
+ movlt r4, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r4, [r12, #28]
+ ldr r4, [sp, #36] @ 4-byte Reload
+ movlt lr, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ cmp r0, #0
+ str lr, [r12, #32]
+ movlt r3, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r3, [r12, #36]
+ movlt r7, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r7, [r12, #40]
+ movlt r6, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ cmp r0, #0
+ str r6, [r12, #44]
+ movlt r5, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r5, [r12, #48]
+ movlt r4, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r4, [r12, #52]
+ movlt r2, r1
+ cmp r0, #0
+ movlt r0, r11
+ str r2, [r12, #56]
+ str r0, [r12, #60]
+ add sp, sp, #88
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end249:
+ .size mcl_fp_addNF16L, .Lfunc_end249-mcl_fp_addNF16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub16L
+ .align 2
+ .type mcl_fp_sub16L,%function
+mcl_fp_sub16L: @ @mcl_fp_sub16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #84
+ sub sp, sp, #84
+ ldr r9, [r2]
+ ldmib r2, {r8, lr}
+ ldr r5, [r1]
+ ldr r12, [r2, #12]
+ ldmib r1, {r4, r6, r7}
+ subs r5, r5, r9
+ sbcs r4, r4, r8
+ str r5, [sp, #60] @ 4-byte Spill
+ ldr r5, [r2, #24]
+ sbcs r6, r6, lr
+ str r4, [sp, #64] @ 4-byte Spill
+ ldr r4, [r2, #20]
+ sbcs r7, r7, r12
+ str r6, [sp, #68] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ sbcs r7, r7, r6
+ ldr r6, [r1, #28]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r1, #20]
+ sbcs r7, r7, r4
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r1, #24]
+ sbcs r7, r7, r5
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ sbcs r11, r6, r7
+ ldr r7, [r2, #32]
+ ldr r6, [r1, #32]
+ sbcs r10, r6, r7
+ ldr r7, [r2, #36]
+ ldr r6, [r1, #36]
+ sbcs r8, r6, r7
+ ldr r7, [r2, #40]
+ ldr r6, [r1, #40]
+ str r8, [sp, #52] @ 4-byte Spill
+ sbcs r5, r6, r7
+ ldr r7, [r2, #44]
+ ldr r6, [r1, #44]
+ str r5, [sp, #48] @ 4-byte Spill
+ sbcs r4, r6, r7
+ ldr r6, [r2, #48]
+ ldr r7, [r1, #48]
+ str r4, [sp, #44] @ 4-byte Spill
+ sbcs lr, r7, r6
+ ldr r6, [r2, #52]
+ ldr r7, [r1, #52]
+ str lr, [sp, #40] @ 4-byte Spill
+ sbcs r9, r7, r6
+ ldr r6, [r2, #56]
+ ldr r7, [r1, #56]
+ ldr r2, [r2, #60]
+ ldr r1, [r1, #60]
+ sbcs r6, r7, r6
+ sbcs r12, r1, r2
+ ldr r1, [sp, #60] @ 4-byte Reload
+ mov r2, #0
+ str r6, [sp, #36] @ 4-byte Spill
+ sbc r2, r2, #0
+ str r12, [sp, #32] @ 4-byte Spill
+ tst r2, #1
+ str r1, [r0]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r1, [r0, #4]
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r1, [r0, #8]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r1, [r0, #12]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r1, [r0, #20]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r1, [r0, #24]
+ str r11, [r0, #28]
+ str r10, [r0, #32]
+ str r8, [r0, #36]
+ str r5, [r0, #40]
+ str r4, [r0, #44]
+ str lr, [r0, #48]
+ str r9, [r0, #52]
+ str r6, [r0, #56]
+ str r12, [r0, #60]
+ beq .LBB250_2
+@ BB#1: @ %carry
+ ldr r2, [r3, #32]
+ ldr r8, [r3, #60]
+ str r11, [sp] @ 4-byte Spill
+ ldr r5, [r3]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r3, #36]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r3, #40]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r3, #44]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r3, #48]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r3, #52]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [r3, #56]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldmib r3, {r4, r11, r12}
+ ldr r1, [sp, #60] @ 4-byte Reload
+ ldr r7, [sp, #76] @ 4-byte Reload
+ ldr lr, [r3, #20]
+ ldr r2, [sp, #80] @ 4-byte Reload
+ adds r5, r5, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r5, [r0]
+ adcs r4, r4, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r4, [r0, #4]
+ adcs r1, r11, r1
+ ldr r11, [r3, #24]
+ adcs r6, r12, r7
+ str r1, [r0, #8]
+ ldr r12, [r3, #28]
+ ldr r3, [r3, #16]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r6, [r0, #12]
+ adcs r1, r3, r1
+ str r1, [r0, #16]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r3, lr, r2
+ ldr r2, [sp] @ 4-byte Reload
+ str r3, [r0, #20]
+ adcs r1, r11, r1
+ str r1, [r0, #24]
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r3, r12, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r3, [r0, #28]
+ ldr r3, [sp, #48] @ 4-byte Reload
+ adcs lr, r1, r10
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str lr, [r0, #32]
+ adcs r2, r1, r2
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r3, r1, r3
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r6, r1, r7
+ ldr r7, [sp, #40] @ 4-byte Reload
+ ldr r1, [sp, #20] @ 4-byte Reload
+ adcs r5, r1, r7
+ ldr r1, [sp, #24] @ 4-byte Reload
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adcs r4, r1, r9
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r7, r1, r7
+ ldr r1, [sp, #32] @ 4-byte Reload
+ adc r12, r8, r1
+ add r1, r0, #36
+ stm r1, {r2, r3, r6}
+ str r5, [r0, #48]
+ add r0, r0, #52
+ stm r0, {r4, r7, r12}
+.LBB250_2: @ %nocarry
+ add sp, sp, #84
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end250:
+ .size mcl_fp_sub16L, .Lfunc_end250-mcl_fp_sub16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF16L
+ .align 2
+ .type mcl_fp_subNF16L,%function
+mcl_fp_subNF16L: @ @mcl_fp_subNF16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #92
+ sub sp, sp, #92
+ ldr r7, [r2, #32]
+ add r9, r2, #8
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [r1, #60]
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [r1, #56]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r1, #52]
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r1, #48]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r1, #44]
+ str r7, [sp, #12] @ 4-byte Spill
+ ldm r2, {r10, r11}
+ ldm r9, {r5, r6, r7, r9}
+ ldr r4, [r2, #24]
+ ldr r2, [r2, #28]
+ str r4, [sp, #60] @ 4-byte Spill
+ str r2, [sp, #64] @ 4-byte Spill
+ ldm r1, {r2, r12, lr}
+ ldr r4, [r1, #12]
+ ldr r8, [r1, #40]
+ subs r2, r2, r10
+ str r2, [sp, #40] @ 4-byte Spill
+ sbcs r2, r12, r11
+ ldr r12, [r1, #36]
+ sbcs lr, lr, r5
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r5, [r1, #32]
+ ldr r2, [sp, #60] @ 4-byte Reload
+ sbcs r4, r4, r6
+ ldr r6, [r1, #16]
+ str lr, [sp] @ 4-byte Spill
+ str r4, [sp, #44] @ 4-byte Spill
+ sbcs r4, r6, r7
+ ldr r7, [r1, #20]
+ str r4, [sp, #52] @ 4-byte Spill
+ sbcs r4, r7, r9
+ ldr r7, [r1, #28]
+ ldr r1, [r1, #24]
+ str r4, [sp, #48] @ 4-byte Spill
+ sbcs r1, r1, r2
+ ldr r2, [sp, #12] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ sbcs r1, r7, r1
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ sbcs r1, r5, r1
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ sbcs r1, r12, r1
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ sbcs r1, r8, r1
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #36] @ 4-byte Reload
+ sbc r2, r2, r1
+ ldr r1, [r3, #32]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #8] @ 4-byte Spill
+ ldr r1, [r3, #56]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [r3, #60]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldm r3, {r1, r4, r5, r6, r7, r8, r9, r10}
+ ldr r3, [sp, #40] @ 4-byte Reload
+ ldr r11, [sp, #20] @ 4-byte Reload
+ adds r1, r3, r1
+ adcs r3, r11, r4
+ ldr r4, [sp, #52] @ 4-byte Reload
+ adcs r12, lr, r5
+ ldr r5, [sp, #44] @ 4-byte Reload
+ adcs lr, r5, r6
+ ldr r5, [sp, #48] @ 4-byte Reload
+ ldr r6, [sp, #60] @ 4-byte Reload
+ adcs r4, r4, r7
+ ldr r7, [sp, #64] @ 4-byte Reload
+ adcs r5, r5, r8
+ ldr r8, [sp, #88] @ 4-byte Reload
+ adcs r9, r6, r9
+ ldr r6, [sp, #12] @ 4-byte Reload
+ adcs r10, r7, r10
+ ldr r7, [sp, #68] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #16] @ 4-byte Reload
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [sp, #72] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #4] @ 4-byte Reload
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [sp, #76] @ 4-byte Reload
+ adcs r6, r7, r6
+ ldr r7, [sp, #80] @ 4-byte Reload
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [sp, #24] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #28] @ 4-byte Reload
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [sp, #84] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #40] @ 4-byte Reload
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r7, r8, r7
+ ldr r8, [sp, #32] @ 4-byte Reload
+ str r7, [sp, #8] @ 4-byte Spill
+ ldr r7, [sp, #56] @ 4-byte Reload
+ adcs r7, r7, r8
+ str r7, [sp, #32] @ 4-byte Spill
+ ldr r7, [sp, #36] @ 4-byte Reload
+ adc r7, r2, r7
+ cmp r2, #0
+ movge r1, r6
+ movge r3, r11
+ str r7, [sp, #36] @ 4-byte Spill
+ ldr r7, [sp, #4] @ 4-byte Reload
+ ldr r6, [sp, #24] @ 4-byte Reload
+ str r1, [r0]
+ ldr r1, [sp] @ 4-byte Reload
+ str r3, [r0, #4]
+ ldr r3, [sp, #8] @ 4-byte Reload
+ movge r12, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ cmp r2, #0
+ str r12, [r0, #8]
+ ldr r12, [sp, #12] @ 4-byte Reload
+ movge lr, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str lr, [r0, #12]
+ ldr lr, [sp, #16] @ 4-byte Reload
+ movge r4, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r4, [r0, #16]
+ ldr r4, [sp, #32] @ 4-byte Reload
+ movge r5, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ cmp r2, #0
+ str r5, [r0, #20]
+ ldr r5, [sp, #28] @ 4-byte Reload
+ movge r9, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r9, [r0, #24]
+ movge r10, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r10, [r0, #28]
+ movge r12, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ cmp r2, #0
+ str r12, [r0, #32]
+ movge lr, r1
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str lr, [r0, #36]
+ movge r7, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r7, [r0, #40]
+ movge r6, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ cmp r2, #0
+ str r6, [r0, #44]
+ movge r5, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r5, [r0, #48]
+ movge r3, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r3, [r0, #52]
+ movge r4, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ cmp r2, #0
+ movge r1, r2
+ str r4, [r0, #56]
+ str r1, [r0, #60]
+ add sp, sp, #92
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end251:
+ .size mcl_fp_subNF16L, .Lfunc_end251-mcl_fp_subNF16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add16L
+ .align 2
+ .type mcl_fpDbl_add16L,%function
+mcl_fpDbl_add16L: @ @mcl_fpDbl_add16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #200
+ sub sp, sp, #200
+ ldm r1, {r7, r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r9}
+ add r10, r1, #32
+ adds r4, r4, r7
+ str r4, [sp, #100] @ 4-byte Spill
+ ldr r4, [r2, #96]
+ str r4, [sp, #164] @ 4-byte Spill
+ ldr r4, [r2, #100]
+ str r4, [sp, #160] @ 4-byte Spill
+ ldr r4, [r2, #104]
+ str r4, [sp, #156] @ 4-byte Spill
+ ldr r4, [r2, #108]
+ str r4, [sp, #180] @ 4-byte Spill
+ ldr r4, [r2, #112]
+ str r4, [sp, #184] @ 4-byte Spill
+ ldr r4, [r2, #116]
+ str r4, [sp, #188] @ 4-byte Spill
+ ldr r4, [r2, #120]
+ str r4, [sp, #192] @ 4-byte Spill
+ ldr r4, [r2, #124]
+ str r4, [sp, #196] @ 4-byte Spill
+ adcs r4, r5, r8
+ adcs r7, r6, lr
+ str r4, [sp, #68] @ 4-byte Spill
+ add lr, r1, #16
+ str r7, [sp, #64] @ 4-byte Spill
+ adcs r7, r9, r12
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #168] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #172] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #176] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r2, #16]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #96]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #104]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #108]
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [r1, #112]
+ str r2, [sp, #120] @ 4-byte Spill
+ ldr r2, [r1, #116]
+ str r2, [sp, #124] @ 4-byte Spill
+ ldr r2, [r1, #120]
+ str r2, [sp, #128] @ 4-byte Spill
+ ldr r2, [r1, #124]
+ str r2, [sp, #132] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r8, r9, r10}
+ ldr r2, [r1, #56]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #100] @ 4-byte Reload
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #64] @ 4-byte Reload
+ str r7, [r0, #8]
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r1, r7, r1
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r2, r7, r2
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ adcs r1, r1, r12
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [r0, #28]
+ adcs r1, r1, r4
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r2, r2, r5
+ str r2, [r0, #36]
+ adcs r1, r1, r6
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r1, [r0, #40]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r2, r2, r8
+ str r2, [r0, #44]
+ adcs r1, r1, r9
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str r1, [r0, #48]
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r2, r2, r10
+ adcs r1, r1, r7
+ str r2, [r0, #52]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #56]
+ ldr r1, [sp, #136] @ 4-byte Reload
+ adcs r2, r2, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r2, [r0, #60]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ adcs r12, r1, r7
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r12, [sp, #92] @ 4-byte Spill
+ adcs r9, r1, r2
+ ldr r1, [sp, #144] @ 4-byte Reload
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r9, [sp, #96] @ 4-byte Spill
+ adcs r8, r1, r2
+ ldr r1, [sp, #148] @ 4-byte Reload
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r8, [sp, #100] @ 4-byte Spill
+ adcs r4, r1, r2
+ ldr r1, [sp, #168] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r4, [sp, #136] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #168] @ 4-byte Spill
+ ldr r1, [sp, #152] @ 4-byte Reload
+ adcs r10, r1, r2
+ ldr r1, [sp, #172] @ 4-byte Reload
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r10, [sp, #88] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #172] @ 4-byte Spill
+ ldr r1, [sp, #176] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str r1, [sp, #176] @ 4-byte Spill
+ ldr r1, [sp, #164] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #164] @ 4-byte Spill
+ ldr r1, [sp, #160] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ str r1, [sp, #160] @ 4-byte Spill
+ ldr r1, [sp, #156] @ 4-byte Reload
+ adcs r11, r1, r2
+ ldr r1, [sp, #180] @ 4-byte Reload
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r11, [sp, #140] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str r1, [sp, #180] @ 4-byte Spill
+ ldr r1, [sp, #184] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ str r1, [sp, #184] @ 4-byte Spill
+ ldr r1, [sp, #188] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #128] @ 4-byte Reload
+ str r1, [sp, #188] @ 4-byte Spill
+ ldr r1, [sp, #192] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #132] @ 4-byte Reload
+ str r1, [sp, #192] @ 4-byte Spill
+ ldr r1, [sp, #196] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #196] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ str r1, [sp, #128] @ 4-byte Spill
+ ldm r3, {r2, r7}
+ ldr r1, [r3, #36]
+ ldr r6, [r3, #8]
+ ldr r5, [r3, #12]
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ subs r12, r12, r2
+ ldr r2, [sp, #168] @ 4-byte Reload
+ sbcs lr, r9, r7
+ sbcs r7, r8, r6
+ ldr r8, [r3, #32]
+ ldr r6, [r3, #24]
+ sbcs r9, r4, r5
+ ldr r5, [r3, #28]
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [r3, #56]
+ str r1, [sp, #152] @ 4-byte Spill
+ ldr r1, [r3, #60]
+ str r1, [sp, #156] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ ldr r3, [r3, #16]
+ sbcs r2, r2, r3
+ sbcs r3, r10, r1
+ ldr r1, [sp, #172] @ 4-byte Reload
+ sbcs r4, r1, r6
+ ldr r1, [sp, #176] @ 4-byte Reload
+ ldr r6, [sp, #120] @ 4-byte Reload
+ sbcs r5, r1, r5
+ ldr r1, [sp, #164] @ 4-byte Reload
+ sbcs r8, r1, r8
+ ldr r1, [sp, #160] @ 4-byte Reload
+ sbcs r10, r1, r6
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #132] @ 4-byte Reload
+ sbcs r11, r11, r1
+ ldr r1, [sp, #180] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp, #144] @ 4-byte Reload
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [sp, #184] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp, #148] @ 4-byte Reload
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [sp, #188] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp, #152] @ 4-byte Reload
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [sp, #192] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp, #156] @ 4-byte Reload
+ str r1, [sp, #152] @ 4-byte Spill
+ ldr r1, [sp, #196] @ 4-byte Reload
+ sbcs r1, r1, r6
+ ldr r6, [sp, #92] @ 4-byte Reload
+ str r1, [sp, #156] @ 4-byte Spill
+ ldr r1, [sp, #128] @ 4-byte Reload
+ sbc r1, r1, #0
+ ands r1, r1, #1
+ movne r12, r6
+ ldr r6, [sp, #96] @ 4-byte Reload
+ str r12, [r0, #64]
+ movne lr, r6
+ ldr r6, [sp, #100] @ 4-byte Reload
+ str lr, [r0, #68]
+ movne r7, r6
+ cmp r1, #0
+ str r7, [r0, #72]
+ ldr r7, [sp, #136] @ 4-byte Reload
+ movne r9, r7
+ ldr r7, [sp, #168] @ 4-byte Reload
+ str r9, [r0, #76]
+ movne r2, r7
+ str r2, [r0, #80]
+ ldr r2, [sp, #88] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #172] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #84]
+ ldr r3, [sp, #132] @ 4-byte Reload
+ movne r4, r2
+ ldr r2, [sp, #176] @ 4-byte Reload
+ str r4, [r0, #88]
+ movne r5, r2
+ ldr r2, [sp, #164] @ 4-byte Reload
+ str r5, [r0, #92]
+ movne r8, r2
+ ldr r2, [sp, #160] @ 4-byte Reload
+ cmp r1, #0
+ str r8, [r0, #96]
+ movne r10, r2
+ ldr r2, [sp, #140] @ 4-byte Reload
+ str r10, [r0, #100]
+ movne r11, r2
+ ldr r2, [sp, #180] @ 4-byte Reload
+ str r11, [r0, #104]
+ movne r3, r2
+ ldr r2, [sp, #184] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #108]
+ ldr r3, [sp, #144] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #188] @ 4-byte Reload
+ str r3, [r0, #112]
+ ldr r3, [sp, #148] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #192] @ 4-byte Reload
+ str r3, [r0, #116]
+ ldr r3, [sp, #152] @ 4-byte Reload
+ movne r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #196] @ 4-byte Reload
+ ldr r2, [sp, #156] @ 4-byte Reload
+ str r3, [r0, #120]
+ movne r2, r1
+ str r2, [r0, #124]
+ add sp, sp, #200
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end252:
+ .size mcl_fpDbl_add16L, .Lfunc_end252-mcl_fpDbl_add16L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub16L
+ .align 2
+ .type mcl_fpDbl_sub16L,%function
+mcl_fpDbl_sub16L: @ @mcl_fpDbl_sub16L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #200
+ sub sp, sp, #200
+ ldr r7, [r2, #96]
+ ldr r9, [r2]
+ add r10, r1, #32
+ str r7, [sp, #168] @ 4-byte Spill
+ ldr r7, [r2, #100]
+ str r7, [sp, #172] @ 4-byte Spill
+ ldr r7, [r2, #104]
+ str r7, [sp, #176] @ 4-byte Spill
+ ldr r7, [r2, #108]
+ str r7, [sp, #180] @ 4-byte Spill
+ ldr r7, [r2, #112]
+ str r7, [sp, #184] @ 4-byte Spill
+ ldr r7, [r2, #116]
+ str r7, [sp, #188] @ 4-byte Spill
+ ldr r7, [r2, #120]
+ str r7, [sp, #192] @ 4-byte Spill
+ ldr r7, [r2, #124]
+ str r7, [sp, #196] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #156] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #160] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #164] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #128] @ 4-byte Spill
+ ldmib r2, {r8, r12, lr}
+ ldm r1, {r4, r5, r6, r7}
+ subs r4, r4, r9
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r2, #52]
+ str r4, [sp, #92] @ 4-byte Spill
+ sbcs r4, r5, r8
+ sbcs r6, r6, r12
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r2, #48]
+ sbcs r7, r7, lr
+ str r6, [sp, #24] @ 4-byte Spill
+ ldr r6, [r2, #44]
+ add lr, r1, #16
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r4, [sp, #88] @ 4-byte Spill
+ str r6, [sp, #84] @ 4-byte Spill
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r2, #16]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #96]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #96] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r2, [sp, #100] @ 4-byte Spill
+ ldr r2, [r1, #104]
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #108]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #112]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #116]
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [r1, #120]
+ str r2, [sp, #120] @ 4-byte Spill
+ ldr r2, [r1, #124]
+ str r2, [sp, #124] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #72] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #76] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r8, r9, r10}
+ ldr r2, [r1, #56]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #24] @ 4-byte Reload
+ add r11, r3, #12
+ str r7, [r0, #8]
+ ldr r7, [sp, #8] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #20] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ sbcs r1, r12, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r2, lr, r2
+ str r2, [r0, #28]
+ sbcs r1, r4, r1
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r2, r5, r2
+ ldr r5, [sp, #72] @ 4-byte Reload
+ str r2, [r0, #36]
+ sbcs r1, r6, r1
+ ldr r2, [sp, #84] @ 4-byte Reload
+ str r1, [r0, #40]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbcs r2, r8, r2
+ str r2, [r0, #44]
+ sbcs r1, r9, r1
+ ldr r2, [sp, #92] @ 4-byte Reload
+ str r1, [r0, #48]
+ ldr r1, [sp, #128] @ 4-byte Reload
+ sbcs r2, r10, r2
+ sbcs r1, r7, r1
+ str r2, [r0, #52]
+ ldr r2, [sp, #132] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #56]
+ ldr r1, [sp, #136] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r2, [r0, #60]
+ ldr r2, [sp, #52] @ 4-byte Reload
+ sbcs r9, r7, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r7, [sp, #96] @ 4-byte Reload
+ str r9, [sp, #80] @ 4-byte Spill
+ sbcs r1, r2, r1
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [sp, #144] @ 4-byte Reload
+ sbcs r12, r2, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r12, [sp, #84] @ 4-byte Spill
+ sbcs lr, r2, r1
+ ldr r1, [sp, #152] @ 4-byte Reload
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str lr, [sp, #88] @ 4-byte Spill
+ sbcs r4, r2, r1
+ ldr r1, [sp, #156] @ 4-byte Reload
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r4, [sp, #92] @ 4-byte Spill
+ sbcs r1, r2, r1
+ ldr r2, [sp, #160] @ 4-byte Reload
+ str r1, [sp, #156] @ 4-byte Spill
+ mov r1, #0
+ sbcs r2, r5, r2
+ ldr r5, [sp, #76] @ 4-byte Reload
+ str r2, [sp, #160] @ 4-byte Spill
+ ldr r2, [sp, #164] @ 4-byte Reload
+ sbcs r2, r5, r2
+ str r2, [sp, #164] @ 4-byte Spill
+ ldr r2, [sp, #168] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #100] @ 4-byte Reload
+ str r2, [sp, #168] @ 4-byte Spill
+ ldr r2, [sp, #172] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #104] @ 4-byte Reload
+ str r2, [sp, #172] @ 4-byte Spill
+ ldr r2, [sp, #176] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r2, [sp, #176] @ 4-byte Spill
+ ldr r2, [sp, #180] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r2, [sp, #180] @ 4-byte Spill
+ ldr r2, [sp, #184] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #116] @ 4-byte Reload
+ str r2, [sp, #184] @ 4-byte Spill
+ ldr r2, [sp, #188] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #120] @ 4-byte Reload
+ str r2, [sp, #188] @ 4-byte Spill
+ ldr r2, [sp, #192] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #124] @ 4-byte Reload
+ str r2, [sp, #192] @ 4-byte Spill
+ ldr r2, [sp, #196] @ 4-byte Reload
+ sbcs r2, r7, r2
+ sbc r1, r1, #0
+ str r2, [sp, #196] @ 4-byte Spill
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #136] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #140] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [r3, #56]
+ str r1, [sp, #152] @ 4-byte Spill
+ ldr r1, [r3, #60]
+ str r1, [sp, #124] @ 4-byte Spill
+ ldm r3, {r2, r6, r7}
+ ldm r11, {r5, r8, r11}
+ ldr r1, [r3, #28]
+ ldr r10, [r3, #24]
+ str r1, [sp, #112] @ 4-byte Spill
+ adds r1, r9, r2
+ ldr r9, [sp, #132] @ 4-byte Reload
+ adcs r2, r9, r6
+ ldr r6, [sp, #164] @ 4-byte Reload
+ adcs r3, r12, r7
+ ldr r7, [sp, #112] @ 4-byte Reload
+ adcs r12, lr, r5
+ ldr r5, [sp, #160] @ 4-byte Reload
+ adcs lr, r4, r8
+ ldr r4, [sp, #156] @ 4-byte Reload
+ adcs r4, r4, r11
+ adcs r5, r5, r10
+ adcs r8, r6, r7
+ ldr r7, [sp, #168] @ 4-byte Reload
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r11, r7, r6
+ ldr r7, [sp, #172] @ 4-byte Reload
+ ldr r6, [sp, #120] @ 4-byte Reload
+ adcs r6, r7, r6
+ ldr r7, [sp, #176] @ 4-byte Reload
+ str r6, [sp, #120] @ 4-byte Spill
+ ldr r6, [sp, #136] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #140] @ 4-byte Reload
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [sp, #180] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #144] @ 4-byte Reload
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [sp, #184] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #148] @ 4-byte Reload
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [sp, #188] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #152] @ 4-byte Reload
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [sp, #192] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #124] @ 4-byte Reload
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [sp, #196] @ 4-byte Reload
+ adc r7, r7, r6
+ ldr r6, [sp, #80] @ 4-byte Reload
+ str r7, [sp, #124] @ 4-byte Spill
+ ldr r7, [sp, #128] @ 4-byte Reload
+ ands r10, r7, #1
+ moveq r1, r6
+ moveq r2, r9
+ str r1, [r0, #64]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r2, [r0, #68]
+ ldr r2, [sp, #120] @ 4-byte Reload
+ moveq r3, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ cmp r10, #0
+ str r3, [r0, #72]
+ moveq r12, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r12, [r0, #76]
+ moveq lr, r1
+ ldr r1, [sp, #156] @ 4-byte Reload
+ str lr, [r0, #80]
+ moveq r4, r1
+ ldr r1, [sp, #160] @ 4-byte Reload
+ cmp r10, #0
+ str r4, [r0, #84]
+ moveq r5, r1
+ ldr r1, [sp, #164] @ 4-byte Reload
+ str r5, [r0, #88]
+ moveq r8, r1
+ ldr r1, [sp, #168] @ 4-byte Reload
+ str r8, [r0, #92]
+ moveq r11, r1
+ ldr r1, [sp, #172] @ 4-byte Reload
+ cmp r10, #0
+ str r11, [r0, #96]
+ moveq r2, r1
+ ldr r1, [sp, #176] @ 4-byte Reload
+ str r2, [r0, #100]
+ ldr r2, [sp, #136] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #180] @ 4-byte Reload
+ str r2, [r0, #104]
+ ldr r2, [sp, #140] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #184] @ 4-byte Reload
+ cmp r10, #0
+ str r2, [r0, #108]
+ ldr r2, [sp, #144] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #188] @ 4-byte Reload
+ str r2, [r0, #112]
+ ldr r2, [sp, #148] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #192] @ 4-byte Reload
+ str r2, [r0, #116]
+ ldr r2, [sp, #152] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #196] @ 4-byte Reload
+ cmp r10, #0
+ str r2, [r0, #120]
+ ldr r2, [sp, #124] @ 4-byte Reload
+ moveq r2, r1
+ str r2, [r0, #124]
+ add sp, sp, #200
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end253:
+ .size mcl_fpDbl_sub16L, .Lfunc_end253-mcl_fpDbl_sub16L
+ .cantunwind
+ .fnend
+
+ .align 2
+ .type .LmulPv544x32,%function
+.LmulPv544x32: @ @mulPv544x32
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r11, lr}
+ ldr r12, [r1]
+ ldmib r1, {r3, lr}
+ ldr r9, [r1, #12]
+ umull r4, r8, lr, r2
+ umull lr, r6, r12, r2
+ mov r5, r4
+ mov r7, r6
+ str lr, [r0]
+ umull lr, r12, r9, r2
+ umlal r7, r5, r3, r2
+ str r5, [r0, #8]
+ str r7, [r0, #4]
+ umull r5, r7, r3, r2
+ adds r3, r6, r5
+ adcs r3, r7, r4
+ adcs r3, r8, lr
+ str r3, [r0, #12]
+ ldr r3, [r1, #16]
+ umull r7, r6, r3, r2
+ adcs r3, r12, r7
+ str r3, [r0, #16]
+ ldr r3, [r1, #20]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #20]
+ ldr r3, [r1, #24]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #24]
+ ldr r3, [r1, #28]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #28]
+ ldr r3, [r1, #32]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #32]
+ ldr r3, [r1, #36]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #36]
+ ldr r3, [r1, #40]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #40]
+ ldr r3, [r1, #44]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #44]
+ ldr r3, [r1, #48]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #48]
+ ldr r3, [r1, #52]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #52]
+ ldr r3, [r1, #56]
+ umull r7, r6, r3, r2
+ adcs r3, r5, r7
+ str r3, [r0, #56]
+ ldr r3, [r1, #60]
+ umull r7, r5, r3, r2
+ adcs r3, r6, r7
+ str r3, [r0, #60]
+ ldr r1, [r1, #64]
+ umull r3, r7, r1, r2
+ adcs r1, r5, r3
+ adc r2, r7, #0
+ str r1, [r0, #64]
+ str r2, [r0, #68]
+ pop {r4, r5, r6, r7, r8, r9, r11, lr}
+ mov pc, lr
+.Lfunc_end254:
+ .size .LmulPv544x32, .Lfunc_end254-.LmulPv544x32
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mulUnitPre17L
+ .align 2
+ .type mcl_fp_mulUnitPre17L,%function
+mcl_fp_mulUnitPre17L: @ @mcl_fp_mulUnitPre17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #100
+ sub sp, sp, #100
+ mov r4, r0
+ add r0, sp, #24
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #92]
+ add r11, sp, #48
+ add lr, sp, #24
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #88]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #84]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #80]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #76]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldm r11, {r5, r6, r7, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ stm r4, {r0, r1, r2, r3, r12, lr}
+ add r0, r4, #24
+ stm r0, {r5, r6, r7, r8, r9, r10, r11}
+ ldr r0, [sp, #4] @ 4-byte Reload
+ str r0, [r4, #52]
+ ldr r0, [sp, #8] @ 4-byte Reload
+ str r0, [r4, #56]
+ ldr r0, [sp, #12] @ 4-byte Reload
+ str r0, [r4, #60]
+ ldr r0, [sp, #16] @ 4-byte Reload
+ str r0, [r4, #64]
+ ldr r0, [sp, #20] @ 4-byte Reload
+ str r0, [r4, #68]
+ add sp, sp, #100
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end255:
+ .size mcl_fp_mulUnitPre17L, .Lfunc_end255-mcl_fp_mulUnitPre17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_mulPre17L
+ .align 2
+ .type mcl_fpDbl_mulPre17L,%function
+mcl_fpDbl_mulPre17L: @ @mcl_fpDbl_mulPre17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #340
+ sub sp, sp, #340
+ .pad #1024
+ sub sp, sp, #1024
+ mov r9, r2
+ add r6, sp, #1024
+ mov r4, r0
+ str r1, [sp, #128] @ 4-byte Spill
+ mov r5, r1
+ ldr r2, [r9]
+ add r0, r6, #264
+ str r9, [sp, #124] @ 4-byte Spill
+ str r4, [sp, #132] @ 4-byte Spill
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1356]
+ ldr r1, [sp, #1292]
+ ldr r2, [r9, #4]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #1352]
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #1296]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #1348]
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #1300]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #1344]
+ str r1, [sp, #44] @ 4-byte Spill
+ mov r1, r5
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #1336]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #1332]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #1328]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #1324]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #1320]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1316]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1312]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #1308]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #1304]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #1288]
+ str r0, [r4]
+ add r0, sp, #1216
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1284]
+ add lr, sp, #1216
+ ldr r10, [sp, #1256]
+ ldr r8, [sp, #1252]
+ ldr r7, [sp, #1248]
+ ldr r6, [sp, #1244]
+ ldr r5, [sp, #1240]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1276]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1272]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1268]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1260]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #56] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #4]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r9, #8]
+ add r9, sp, #1024
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, r9, #120
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1212]
+ ldr r9, [sp, #56] @ 4-byte Reload
+ ldr r8, [sp, #1184]
+ ldr r7, [sp, #1180]
+ ldr r11, [sp, #1176]
+ ldr r5, [sp, #1172]
+ ldr lr, [sp, #1168]
+ ldr r10, [sp, #1164]
+ ldr r12, [sp, #1160]
+ ldr r1, [sp, #1148]
+ ldr r2, [sp, #1152]
+ ldr r3, [sp, #1156]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1208]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1204]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1200]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ adds r0, r0, r9
+ str r0, [r4, #8]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r2, [r5, #12]
+ adcs r0, r11, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #1072
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1140]
+ add lr, sp, #1072
+ ldr r10, [sp, #1112]
+ ldr r9, [sp, #1108]
+ ldr r8, [sp, #1104]
+ ldr r7, [sp, #1100]
+ ldr r6, [sp, #1096]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1132]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1128]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1124]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1120]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1116]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #56] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #12]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r4, [sp, #128] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #16]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #1000
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1068]
+ add r11, sp, #1024
+ add lr, sp, #1000
+ ldr r6, [sp, #1040]
+ ldr r5, [sp, #1036]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #56] @ 4-byte Reload
+ ldr r8, [sp, #132] @ 4-byte Reload
+ adds r0, r0, r7
+ str r0, [r8, #16]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r2, [r5, #20]
+ adcs r0, r6, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #928
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #996]
+ add r11, sp, #952
+ add lr, sp, #928
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #988]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #984]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #980]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #976]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r11, {r6, r7, r8, r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r4, [sp, #56] @ 4-byte Reload
+ adds r0, r0, r4
+ ldr r4, [sp, #132] @ 4-byte Reload
+ str r0, [r4, #20]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r5, #24]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #856
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #924]
+ add r11, sp, #880
+ add lr, sp, #856
+ ldr r7, [sp, #896]
+ ldr r5, [sp, #892]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #920]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #916]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #912]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #908]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #904]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #900]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r8, [sp, #56] @ 4-byte Reload
+ adds r0, r0, r8
+ str r0, [r4, #24]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r4, [sp, #124] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #28]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r5, r0
+ mov r5, r6
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #784
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #852]
+ add r10, sp, #808
+ add lr, sp, #784
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #848]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #844]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #840]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #836]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #832]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #828]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #824]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r6, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r7, [sp, #56] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adds r0, r0, r7
+ str r0, [r11, #28]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #32]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #712
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #780]
+ add r8, sp, #748
+ add r11, sp, #736
+ add lr, sp, #712
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #776]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #772]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #768]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #764]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r8, {r4, r6, r7, r8}
+ ldm r11, {r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r5, [sp, #56] @ 4-byte Reload
+ adds r0, r0, r5
+ ldr r5, [sp, #132] @ 4-byte Reload
+ str r0, [r5, #32]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r2, [r6, #36]
+ adcs r0, r7, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #640
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #708]
+ add r10, sp, #664
+ add lr, sp, #640
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #688]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #684]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #680]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #56] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r5, #36]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r6, #40]
+ mov r6, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #568
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #636]
+ add r11, sp, #592
+ add lr, sp, #568
+ ldr r7, [sp, #608]
+ ldr r4, [sp, #604]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #632]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #628]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #624]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #620]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #616]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #612]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r8, [sp, #56] @ 4-byte Reload
+ adds r0, r0, r8
+ str r0, [r5, #40]
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ ldr r2, [r4, #44]
+ adcs r0, r7, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #496
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #564]
+ add r10, sp, #520
+ add lr, sp, #496
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #548]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #540]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #48] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r5, #44]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r5, [sp, #128] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r4, #48]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r8, r0
+ mov r8, r4
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #424
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #492]
+ add lr, sp, #428
+ ldr r9, [sp, #460]
+ ldr r7, [sp, #456]
+ ldr r11, [sp, #452]
+ ldr r10, [sp, #448]
+ ldr r3, [sp, #424]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #488]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #484]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #480]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #476]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #468]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #464]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r12, lr}
+ ldr r6, [sp, #48] @ 4-byte Reload
+ ldr r4, [sp, #120] @ 4-byte Reload
+ adds r3, r3, r6
+ ldr r6, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ str r3, [r6, #48]
+ ldr r3, [r8, #52]
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r2, r0
+ mov r2, r3
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r5
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #352
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #420]
+ add r11, sp, #380
+ add r12, sp, #356
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #404]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #400]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #396]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r4, r9, r10, r11}
+ ldr r5, [sp, #376]
+ ldr lr, [sp, #352]
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r7, [sp, #52] @ 4-byte Reload
+ adds r7, lr, r7
+ ldr lr, [r8, #56]
+ str r7, [r6, #52]
+ ldr r6, [sp, #120] @ 4-byte Reload
+ add r7, sp, #280
+ adcs r0, r0, r6
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r2, r0
+ mov r2, lr
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, r7
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #348]
+ add r8, sp, #316
+ add r11, sp, #304
+ add lr, sp, #280
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #344]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #340]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #336]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #332]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #328]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r8, {r6, r7, r8}
+ ldm r11, {r9, r10, r11}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r5, [sp, #52] @ 4-byte Reload
+ adds r0, r0, r5
+ ldr r5, [sp, #132] @ 4-byte Reload
+ str r0, [r5, #56]
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r2, [r8, #60]
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #208
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #276]
+ add lr, sp, #228
+ add r12, sp, #212
+ ldr r6, [sp, #248]
+ ldr r9, [sp, #244]
+ ldr r4, [sp, #240]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #264]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #256]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #252]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm lr, {r10, r11, lr}
+ ldr r3, [sp, #208]
+ ldm r12, {r0, r1, r2, r12}
+ ldr r7, [sp, #88] @ 4-byte Reload
+ adds r3, r3, r7
+ str r3, [r5, #60]
+ ldr r5, [sp, #120] @ 4-byte Reload
+ ldr r3, [r8, #64]
+ adcs r8, r0, r5
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r5, r1, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r0, r2, r0
+ mov r2, r3
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ add r0, sp, #136
+ bl .LmulPv544x32(PLT)
+ add r3, sp, #136
+ add r11, sp, #172
+ add lr, sp, #152
+ ldm r3, {r0, r1, r2, r3}
+ adds r7, r0, r8
+ ldr r0, [sp, #12] @ 4-byte Reload
+ adcs r6, r1, r5
+ adcs r5, r2, r0
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r4, r3, r0
+ ldr r0, [sp, #204]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #200]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #196]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldm lr, {r0, r2, r3, r12, lr}
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r7, [r1, #64]
+ str r6, [r1, #68]
+ str r5, [r1, #72]
+ ldr r5, [sp, #44] @ 4-byte Reload
+ str r4, [r1, #76]
+ ldr r4, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [r1, #80]
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r2, r2, r4
+ str r2, [r1, #84]
+ ldr r2, [sp, #88] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [r1, #88]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r2, r12, r2
+ str r2, [r1, #92]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [r1, #96]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r2, r8, r2
+ str r2, [r1, #100]
+ ldr r2, [sp, #104] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [r1, #104]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r2, r10, r2
+ str r2, [r1, #108]
+ ldr r2, [sp, #72] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [r1, #112]
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str r0, [r1, #116]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r0, [r1, #120]
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #84] @ 4-byte Reload
+ str r0, [r1, #124]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #128] @ 4-byte Reload
+ str r0, [r1, #128]
+ adc r2, r2, #0
+ str r2, [r1, #132]
+ add sp, sp, #340
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end256:
+ .size mcl_fpDbl_mulPre17L, .Lfunc_end256-mcl_fpDbl_mulPre17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sqrPre17L
+ .align 2
+ .type mcl_fpDbl_sqrPre17L,%function
+mcl_fpDbl_sqrPre17L: @ @mcl_fpDbl_sqrPre17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #332
+ sub sp, sp, #332
+ .pad #1024
+ sub sp, sp, #1024
+ mov r7, r1
+ mov r4, r0
+ add r0, sp, #1280
+ ldr r2, [r7]
+ str r7, [sp, #120] @ 4-byte Spill
+ str r4, [sp, #124] @ 4-byte Spill
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1348]
+ ldr r1, [sp, #1284]
+ ldr r2, [r7, #4]
+ add r11, sp, #1024
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #1344]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #1288]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #1292]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #1336]
+ str r1, [sp, #40] @ 4-byte Spill
+ mov r1, r7
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #1332]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #1328]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #1324]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #1320]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #1316]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #1312]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #1308]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1304]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #1300]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #1296]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [r4]
+ add r0, r11, #184
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1276]
+ add r10, sp, #1232
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1272]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1268]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1264]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1260]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1256]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1252]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r5, r6, r8, r9, r10}
+ ldr r0, [sp, #1208]
+ ldr r11, [sp, #52] @ 4-byte Reload
+ ldr lr, [sp, #1228]
+ ldr r12, [sp, #1224]
+ ldr r1, [sp, #1212]
+ ldr r2, [sp, #1216]
+ ldr r3, [sp, #1220]
+ adds r0, r0, r11
+ str r0, [r4, #4]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #8]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #1136
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1204]
+ add r12, sp, #1136
+ ldr r6, [sp, #1176]
+ ldr r4, [sp, #1172]
+ ldr lr, [sp, #1168]
+ ldr r11, [sp, #1164]
+ ldr r10, [sp, #1160]
+ ldr r9, [sp, #1156]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1200]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1184]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1180]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r5, [sp, #52] @ 4-byte Reload
+ ldr r8, [sp, #124] @ 4-byte Reload
+ adds r0, r0, r5
+ str r0, [r8, #8]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #12]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r4, r0
+ add r4, sp, #1024
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, r4, #40
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1132]
+ add r11, sp, #1088
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1128]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1124]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1120]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1116]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1112]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r11, {r5, r6, r8, r9, r10, r11}
+ ldr r0, [sp, #1064]
+ ldr r4, [sp, #52] @ 4-byte Reload
+ ldr lr, [sp, #1084]
+ ldr r12, [sp, #1080]
+ ldr r1, [sp, #1068]
+ ldr r2, [sp, #1072]
+ ldr r3, [sp, #1076]
+ adds r0, r0, r4
+ ldr r4, [sp, #124] @ 4-byte Reload
+ str r0, [r4, #12]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #16]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #992
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1060]
+ add lr, sp, #1012
+ add r12, sp, #992
+ ldr r6, [sp, #1032]
+ ldr r5, [sp, #1028]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm lr, {r9, r10, r11, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r8, [sp, #52] @ 4-byte Reload
+ adds r0, r0, r8
+ str r0, [r4, #16]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #20]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #920
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #988]
+ add r10, sp, #944
+ add lr, sp, #920
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #984]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #980]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #976]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #972]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #968]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #964]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r5, r6, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #52] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #20]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #24]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #848
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #916]
+ add lr, sp, #868
+ add r12, sp, #848
+ ldr r6, [sp, #888]
+ ldr r5, [sp, #884]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #912]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #908]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #904]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #900]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #896]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #892]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm lr, {r9, r10, r11, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r8, [sp, #52] @ 4-byte Reload
+ adds r0, r0, r8
+ str r0, [r4, #24]
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #28]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #776
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #844]
+ add r10, sp, #800
+ add lr, sp, #776
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #840]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #836]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #832]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #828]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #824]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #820]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r5, r6, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #52] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #28]
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #32]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #704
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #772]
+ add lr, sp, #724
+ add r12, sp, #704
+ ldr r6, [sp, #744]
+ ldr r5, [sp, #740]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #768]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #764]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #760]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #756]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #752]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #748]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm lr, {r9, r10, r11, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r8, [sp, #52] @ 4-byte Reload
+ adds r0, r0, r8
+ str r0, [r4, #32]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #36]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #632
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #700]
+ add r10, sp, #656
+ add lr, sp, #632
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #692]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #688]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #684]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #680]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #676]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r5, r6, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #52] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #36]
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [r7, #40]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #560
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #628]
+ add r7, sp, #596
+ add lr, sp, #580
+ add r12, sp, #560
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #624]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #620]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #616]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #612]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #608]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r7, {r5, r6, r7}
+ ldm lr, {r9, r10, r11, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r8, [sp, #52] @ 4-byte Reload
+ adds r0, r0, r8
+ str r0, [r4, #40]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ ldr r2, [r1, #44]
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #488
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #556]
+ add r10, sp, #512
+ add lr, sp, #488
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #548]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #544]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #540]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #536]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #44] @ 4-byte Reload
+ adds r0, r0, r11
+ str r0, [r4, #44]
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r2, [r9, #48]
+ adcs r0, r10, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #416
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #484]
+ add r10, sp, #444
+ add lr, sp, #420
+ mov r8, r4
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #480]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #476]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #472]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #468]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #464]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #460]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #456]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldm r10, {r5, r6, r10}
+ ldr r11, [sp, #440]
+ ldr r12, [sp, #416]
+ ldm lr, {r0, r1, r2, r3, lr}
+ ldr r7, [sp, #44] @ 4-byte Reload
+ adds r7, r12, r7
+ str r7, [r4, #48]
+ ldr r7, [sp, #116] @ 4-byte Reload
+ mov r4, r9
+ add r9, sp, #344
+ ldr r12, [r4, #52]
+ adcs r7, r0, r7
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r2, r0
+ mov r2, r12
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r6, r0
+ mov r6, r4
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ mov r0, r9
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #412]
+ add r11, sp, #368
+ add r12, sp, #348
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #404]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #400]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #396]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #392]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #388]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r11, {r4, r5, r9, r10, r11}
+ ldr lr, [sp, #344]
+ ldm r12, {r0, r1, r2, r3, r12}
+ adds r7, lr, r7
+ str r7, [r8, #52]
+ mov r7, r6
+ ldr r6, [sp, #116] @ 4-byte Reload
+ add r8, sp, #272
+ ldr lr, [r7, #56]
+ adcs r0, r0, r6
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r2, r0
+ mov r2, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r7
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ mov r0, r8
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #340]
+ add r8, sp, #308
+ add lr, sp, #292
+ add r12, sp, #272
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #336]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #332]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #328]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #324]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r8, {r5, r6, r7, r8}
+ ldm lr, {r9, r10, r11, lr}
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r4, [sp, #48] @ 4-byte Reload
+ adds r0, r0, r4
+ ldr r4, [sp, #124] @ 4-byte Reload
+ str r0, [r4, #56]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r2, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r12, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r2, [r6, #60]
+ adcs r0, r7, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #200
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #268]
+ add r9, sp, #232
+ add lr, sp, #204
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #264]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #260]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #256]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #252]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #248]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #244]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r9, {r5, r8, r9}
+ ldr r10, [sp, #228]
+ ldr r12, [sp, #200]
+ ldm lr, {r0, r1, r2, r3, r11, lr}
+ ldr r7, [sp, #80] @ 4-byte Reload
+ adds r7, r12, r7
+ ldr r12, [r6, #64]
+ str r7, [r4, #60]
+ ldr r4, [sp, #116] @ 4-byte Reload
+ adcs r7, r0, r4
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r4, r1, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adcs r0, r2, r0
+ mov r2, r12
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r10, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r8, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r1, r0
+ mov r1, r6
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #116] @ 4-byte Spill
+ add r0, sp, #128
+ bl .LmulPv544x32(PLT)
+ add r3, sp, #128
+ add r11, sp, #164
+ add lr, sp, #144
+ ldm r3, {r0, r1, r2, r3}
+ adds r7, r0, r7
+ ldr r0, [sp, #8] @ 4-byte Reload
+ adcs r6, r1, r4
+ adcs r5, r2, r0
+ ldr r0, [sp, #4] @ 4-byte Reload
+ adcs r4, r3, r0
+ ldr r0, [sp, #196]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #184]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #180]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldm lr, {r0, r2, r3, r12, lr}
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r7, [r1, #64]
+ str r6, [r1, #68]
+ str r5, [r1, #72]
+ ldr r5, [sp, #40] @ 4-byte Reload
+ str r4, [r1, #76]
+ ldr r4, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [r1, #80]
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r2, r2, r4
+ str r2, [r1, #84]
+ ldr r2, [sp, #80] @ 4-byte Reload
+ adcs r0, r3, r0
+ str r0, [r1, #88]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r2, r12, r2
+ str r2, [r1, #92]
+ ldr r2, [sp, #88] @ 4-byte Reload
+ adcs r0, lr, r0
+ str r0, [r1, #96]
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r2, r8, r2
+ str r2, [r1, #100]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [r1, #104]
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r2, r10, r2
+ str r2, [r1, #108]
+ ldr r2, [sp, #64] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [r1, #112]
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r0, [r1, #116]
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r0, [r1, #120]
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str r0, [r1, #124]
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r2, r0
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str r0, [r1, #128]
+ adc r2, r2, #0
+ str r2, [r1, #132]
+ add sp, sp, #332
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end257:
+ .size mcl_fpDbl_sqrPre17L, .Lfunc_end257-mcl_fpDbl_sqrPre17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_mont17L
+ .align 2
+ .type mcl_fp_mont17L,%function
+mcl_fp_mont17L: @ @mcl_fp_mont17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #556
+ sub sp, sp, #556
+ .pad #2048
+ sub sp, sp, #2048
+ add r12, sp, #140
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #96] @ 4-byte Spill
+ add r0, sp, #2528
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #136] @ 4-byte Spill
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2528]
+ ldr r1, [sp, #2532]
+ mul r2, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #2596]
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #2536]
+ add r5, sp, #2048
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #2592]
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #2540]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #2588]
+ str r1, [sp, #92] @ 4-byte Spill
+ mov r1, r4
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #2584]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #2580]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #2576]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #2572]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #2568]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #2564]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #2560]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #2556]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #2552]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #2548]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2544]
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, r5, #408
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2524]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r4, [sp, #2484]
+ ldr r10, [sp, #2480]
+ ldr r6, [sp, #2476]
+ ldr r7, [sp, #2472]
+ ldr r11, [sp, #2456]
+ ldr r9, [sp, #2460]
+ ldr r5, [sp, #2464]
+ ldr r8, [sp, #2468]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #2520]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #2516]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2512]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2508]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2504]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2500]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2496]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2492]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2488]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r2, [r0, #4]
+ add r0, sp, #2384
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #80] @ 4-byte Reload
+ ldr r1, [sp, #76] @ 4-byte Reload
+ ldr r2, [sp, #20] @ 4-byte Reload
+ ldr r3, [sp, #2400]
+ ldr r12, [sp, #2404]
+ ldr lr, [sp, #2408]
+ adds r0, r11, r0
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r9, r0
+ ldr r9, [sp, #2424]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r11, [sp, #104] @ 4-byte Reload
+ adcs r0, r5, r0
+ ldr r5, [sp, #2416]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r8, r0
+ ldr r8, [sp, #2384]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ ldr r7, [sp, #100] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r6, r0
+ ldr r6, [sp, #2420]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r10, r0
+ ldr r10, [sp, #2428]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r4, r0
+ ldr r4, [sp, #2412]
+ adcs r1, r2, r1
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ mov r0, #0
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #124] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #128] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [sp, #132] @ 4-byte Reload
+ adcs r1, r2, r1
+ ldr r2, [sp, #2396]
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #2392]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2452]
+ str r8, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2448]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2444]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2440]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #2436]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #2432]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #2388]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #2048
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r4, #264
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2380]
+ add r10, sp, #2320
+ ldr r7, [sp, #2340]
+ ldr r6, [sp, #2336]
+ ldr r4, [sp, #2312]
+ ldr r11, [sp, #2316]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2376]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2372]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2368]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2364]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2360]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2356]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2352]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2348]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2344]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, sp, #2240
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #2252]
+ ldr r3, [sp, #2256]
+ ldr r12, [sp, #2260]
+ ldr lr, [sp, #2264]
+ adds r0, r0, r4
+ ldr r4, [sp, #2268]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #2272]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #2240]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #2280]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #2284]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #2276]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #2248]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #2308]
+ str r8, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2304]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2300]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2296]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2292]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2288]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2244]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #2048
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r4, #120
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2236]
+ add r10, sp, #2176
+ ldr r7, [sp, #2196]
+ ldr r6, [sp, #2192]
+ ldr r4, [sp, #2168]
+ ldr r11, [sp, #2172]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2232]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2228]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2224]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2220]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2216]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2212]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2208]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2204]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2200]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, sp, #2096
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #2108]
+ ldr r3, [sp, #2112]
+ ldr r12, [sp, #2116]
+ ldr lr, [sp, #2120]
+ adds r0, r0, r4
+ ldr r4, [sp, #2124]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #2128]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #2096]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #2136]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #2140]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #2132]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #2104]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #2164]
+ str r8, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2160]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2156]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2152]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2148]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2144]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2100]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r4, #1000
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2092]
+ add r10, sp, #2032
+ ldr r7, [sp, #2052]
+ ldr r6, [sp, #2048]
+ ldr r4, [sp, #2024]
+ ldr r11, [sp, #2028]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2088]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2084]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2080]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2076]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2072]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2068]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2064]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2060]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2056]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, sp, #1952
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1964]
+ ldr r3, [sp, #1968]
+ ldr r12, [sp, #1972]
+ ldr lr, [sp, #1976]
+ adds r0, r0, r4
+ ldr r4, [sp, #1980]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1984]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1952]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1992]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1996]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1988]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1960]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #2020]
+ str r8, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2016]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2012]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2008]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2004]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2000]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1956]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r4, #856
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1948]
+ add r10, sp, #1888
+ ldr r7, [sp, #1908]
+ ldr r6, [sp, #1904]
+ ldr r4, [sp, #1880]
+ ldr r11, [sp, #1884]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1944]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1940]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1936]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1932]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1928]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1924]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1920]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1916]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1912]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, sp, #1808
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1820]
+ ldr r3, [sp, #1824]
+ ldr r12, [sp, #1828]
+ ldr lr, [sp, #1832]
+ adds r0, r0, r4
+ ldr r4, [sp, #1836]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1840]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1808]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1848]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1852]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1844]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1816]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1876]
+ str r8, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1872]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1868]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1864]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1860]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1856]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1812]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r4, #712
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1804]
+ add r10, sp, #1744
+ ldr r7, [sp, #1764]
+ ldr r6, [sp, #1760]
+ ldr r4, [sp, #1736]
+ ldr r11, [sp, #1740]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1800]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1796]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1792]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1788]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1784]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1780]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1776]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1772]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1768]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, sp, #1664
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1676]
+ ldr r3, [sp, #1680]
+ ldr r12, [sp, #1684]
+ ldr lr, [sp, #1688]
+ adds r0, r0, r4
+ ldr r4, [sp, #1692]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1696]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1664]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1704]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1708]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1700]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1672]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1732]
+ str r8, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1728]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1724]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1720]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1716]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1712]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1668]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r4, #568
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1660]
+ add r10, sp, #1600
+ ldr r7, [sp, #1620]
+ ldr r6, [sp, #1616]
+ ldr r4, [sp, #1592]
+ ldr r11, [sp, #1596]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1656]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1652]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1648]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1644]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1640]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1636]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1632]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1628]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1624]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, sp, #1520
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1532]
+ ldr r3, [sp, #1536]
+ ldr r12, [sp, #1540]
+ ldr lr, [sp, #1544]
+ adds r0, r0, r4
+ ldr r4, [sp, #1548]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1552]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1520]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1560]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1564]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1556]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1528]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1588]
+ str r8, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1584]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1580]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1576]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1572]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1568]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1524]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r4, #424
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1516]
+ add r10, sp, #1456
+ ldr r7, [sp, #1476]
+ ldr r6, [sp, #1472]
+ ldr r4, [sp, #1448]
+ ldr r11, [sp, #1452]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1512]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1508]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1504]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1500]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1496]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1492]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1488]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1484]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1480]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, sp, #1376
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1388]
+ ldr r3, [sp, #1392]
+ ldr r12, [sp, #1396]
+ ldr lr, [sp, #1400]
+ adds r0, r0, r4
+ ldr r4, [sp, #1404]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1408]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1376]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1416]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1420]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1412]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1384]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1444]
+ str r8, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1440]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1436]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1432]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1428]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1424]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1380]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r8, r0
+ add r0, r4, #280
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1372]
+ add r10, sp, #1312
+ ldr r7, [sp, #1332]
+ ldr r6, [sp, #1328]
+ ldr r4, [sp, #1304]
+ ldr r11, [sp, #1308]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1368]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1364]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1360]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1356]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1352]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1348]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1344]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1336]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r5, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, sp, #1232
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ ldr r2, [sp, #1244]
+ ldr r3, [sp, #1248]
+ ldr r12, [sp, #1252]
+ ldr lr, [sp, #1256]
+ adds r0, r0, r4
+ ldr r4, [sp, #1260]
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r11, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #1264]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ ldr r8, [sp, #1232]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #1272]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ ldr r10, [sp, #1276]
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1268]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #1240]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ adds r8, r11, r8
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #1300]
+ str r8, [sp, #36] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1296]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1292]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1288]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1284]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1236]
+ adcs r0, r7, r0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ add r4, sp, #1024
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #136] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ mul r2, r8, r5
+ adcs r0, r0, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, r4, #136
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1228]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r11, [sp, #1184]
+ ldr r4, [sp, #1180]
+ ldr r6, [sp, #1176]
+ ldr r7, [sp, #1160]
+ ldr r8, [sp, #1164]
+ ldr r9, [sp, #1168]
+ ldr r10, [sp, #1172]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1224]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1220]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1216]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1212]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1208]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1204]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1200]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, sp, #1088
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #36] @ 4-byte Reload
+ ldr r1, [sp, #128] @ 4-byte Reload
+ ldr r2, [sp, #12] @ 4-byte Reload
+ add lr, sp, #1104
+ adds r0, r0, r7
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #1088
+ adcs r1, r1, r9
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [sp, #124] @ 4-byte Reload
+ adcs r1, r1, r10
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #16] @ 4-byte Reload
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #68] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #64] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #60] @ 4-byte Spill
+ ldm r8, {r4, r6, r8}
+ ldr r7, [sp, #1100]
+ ldr r10, [sp, #1140]
+ ldr r9, [sp, #1136]
+ adds r0, r0, r4
+ ldr r4, [sp, #1128]
+ mul r1, r0, r5
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #1156]
+ ldr r5, [sp, #1132]
+ str r1, [sp, #52] @ 4-byte Spill
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1152]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #128] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #128] @ 4-byte Spill
+ ldr r6, [sp, #124] @ 4-byte Reload
+ adcs r6, r6, r8
+ str r6, [sp, #124] @ 4-byte Spill
+ ldr r6, [sp, #120] @ 4-byte Reload
+ adcs r6, r6, r7
+ str r6, [sp, #120] @ 4-byte Spill
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #1016
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1084]
+ add r10, sp, #1016
+ ldr r11, [sp, #1044]
+ ldr r4, [sp, #1040]
+ ldr r5, [sp, #1036]
+ ldr r6, [sp, #1032]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1080]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1076]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1072]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm r10, {r7, r8, r9, r10}
+ ldr r0, [sp, #144] @ 4-byte Reload
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #944
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #132] @ 4-byte Reload
+ ldr r1, [sp, #124] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ add lr, sp, #960
+ adds r0, r0, r7
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #944
+ adcs r1, r1, r9
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ adcs r1, r1, r10
+ add r10, sp, #984
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r1, r6
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r1, r5
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r1, r4
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #88] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adc r1, r1, #0
+ str r1, [sp, #32] @ 4-byte Spill
+ ldm r8, {r4, r6, r8}
+ ldr r7, [sp, #956]
+ adds r1, r0, r4
+ ldr r0, [sp, #136] @ 4-byte Reload
+ str r1, [sp, #132] @ 4-byte Spill
+ mul r2, r1, r0
+ ldr r0, [sp, #1012]
+ str r2, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1008]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1004]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1000]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r10, {r4, r5, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #128] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #80] @ 4-byte Spill
+ ldr r6, [sp, #124] @ 4-byte Reload
+ adcs r6, r6, r8
+ str r6, [sp, #76] @ 4-byte Spill
+ ldr r6, [sp, #120] @ 4-byte Reload
+ adcs r6, r6, r7
+ str r6, [sp, #72] @ 4-byte Spill
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ add r0, sp, #872
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #940]
+ add r11, sp, #880
+ ldr r5, [sp, #900]
+ ldr r4, [sp, #896]
+ ldr r9, [sp, #872]
+ ldr r10, [sp, #876]
+ add r0, sp, #800
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #936]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #932]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #928]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #924]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #920]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #916]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #912]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #908]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #904]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r11, {r6, r7, r8, r11}
+ ldr r1, [sp, #144] @ 4-byte Reload
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #132] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #804
+ adds r0, r0, r9
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #828
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #868]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #864]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #860]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #856]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #852]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #848]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r7, [sp, #800]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #132] @ 4-byte Reload
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adds r7, r11, r7
+ adcs r0, r6, r0
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #728
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #796]
+ add r9, sp, #732
+ ldr r5, [sp, #756]
+ ldr r11, [sp, #752]
+ ldr r8, [sp, #748]
+ ldr r10, [sp, #728]
+ add r0, sp, #656
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #792]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #788]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #784]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #780]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #776]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #772]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #768]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #764]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #760]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r9, {r4, r6, r7, r9}
+ ldr r1, [sp, #144] @ 4-byte Reload
+ ldr r2, [r1, #52]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #660
+ adds r0, r0, r10
+ add r10, sp, #684
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #724]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #720]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #716]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r7, [sp, #656]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #84] @ 4-byte Reload
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adds r7, r11, r7
+ adcs r0, r6, r0
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #584
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #652]
+ add r9, sp, #588
+ ldr r5, [sp, #612]
+ ldr r11, [sp, #608]
+ ldr r8, [sp, #604]
+ ldr r10, [sp, #584]
+ add r0, sp, #512
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #648]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #644]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #640]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #636]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #632]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #628]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #624]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #620]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #616]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r9, {r4, r6, r7, r9}
+ ldr r1, [sp, #144] @ 4-byte Reload
+ ldr r2, [r1, #56]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #516
+ adds r0, r0, r10
+ add r10, sp, #540
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #576]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #572]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #568]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #564]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r7, [sp, #512]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #84] @ 4-byte Reload
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adds r7, r11, r7
+ adcs r0, r6, r0
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #440
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #508]
+ add r9, sp, #444
+ ldr r5, [sp, #468]
+ ldr r11, [sp, #464]
+ ldr r8, [sp, #460]
+ ldr r10, [sp, #440]
+ add r0, sp, #368
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #504]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #500]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #496]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #492]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #488]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #484]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #480]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #476]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #472]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r9, {r4, r6, r7, r9}
+ ldr r1, [sp, #144] @ 4-byte Reload
+ ldr r2, [r1, #60]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #16] @ 4-byte Reload
+ add lr, sp, #372
+ adds r0, r0, r10
+ add r10, sp, #396
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #436]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #432]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #428]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #424]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #420]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r7, [sp, #368]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #84] @ 4-byte Reload
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adds r7, r11, r7
+ adcs r0, r6, r0
+ str r7, [sp, #32] @ 4-byte Spill
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #296
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #364]
+ add r11, sp, #312
+ add r7, sp, #300
+ ldr r9, [sp, #324]
+ add r0, sp, #224
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [sp, #360]
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #356]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #352]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #348]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #344]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #340]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #336]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #332]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #328]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r11, {r4, r10, r11}
+ ldr r8, [sp, #296]
+ ldm r7, {r5, r6, r7}
+ ldr r1, [sp, #144] @ 4-byte Reload
+ ldr r2, [r1, #64]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #80] @ 4-byte Reload
+ ldr r2, [sp, #16] @ 4-byte Reload
+ add lr, sp, #240
+ adds r0, r0, r8
+ ldr r8, [sp, #232]
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #268]
+ adcs r1, r1, r6
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r1, r1, r7
+ ldr r7, [sp, #236]
+ str r1, [sp, #140] @ 4-byte Spill
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r4
+ ldr r4, [sp, #224]
+ str r1, [sp, #84] @ 4-byte Spill
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r10
+ str r1, [sp, #80] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r1, r1, r11
+ ldr r11, [sp, #228]
+ str r1, [sp, #76] @ 4-byte Spill
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [sp, #72] @ 4-byte Spill
+ ldr r1, [sp, #132] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [sp, #128] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r1, [sp, #128] @ 4-byte Spill
+ ldr r1, [sp, #124] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [sp, #120] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [sp, #116] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #116] @ 4-byte Spill
+ ldr r1, [sp, #112] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #112] @ 4-byte Spill
+ ldr r1, [sp, #108] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #108] @ 4-byte Spill
+ ldr r1, [sp, #104] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #104] @ 4-byte Spill
+ ldr r1, [sp, #100] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #92] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adc r1, r1, #0
+ adds r9, r0, r4
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r4, [sp, #264]
+ str r1, [sp, #88] @ 4-byte Spill
+ mul r1, r9, r0
+ ldr r0, [sp, #292]
+ str r1, [sp, #68] @ 4-byte Spill
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #288]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #284]
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #280]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #276]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r10, [sp, #144] @ 4-byte Reload
+ ldr r6, [sp, #140] @ 4-byte Reload
+ adcs r11, r10, r11
+ adcs r10, r6, r8
+ ldr r6, [sp, #84] @ 4-byte Reload
+ adcs r7, r6, r7
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #148] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #136] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #144] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ add r0, sp, #152
+ bl .LmulPv544x32(PLT)
+ add r3, sp, #152
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r9, r0
+ adcs r4, r11, r1
+ ldr r0, [sp, #168]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r6, r10, r2
+ str r4, [sp, #52] @ 4-byte Spill
+ adcs r9, r7, r3
+ mov r3, r5
+ str r6, [sp, #60] @ 4-byte Spill
+ str r9, [sp, #68] @ 4-byte Spill
+ adcs lr, r1, r0
+ ldr r0, [sp, #172]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str lr, [sp, #72] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #176]
+ adcs r0, r1, r0
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #180]
+ adcs r0, r1, r0
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #184]
+ adcs r0, r1, r0
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ adcs r0, r1, r0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ adcs r11, r1, r0
+ ldr r0, [sp, #196]
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r11, [sp, #76] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #200]
+ adcs r0, r8, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #204]
+ adcs r0, r1, r0
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #208]
+ adcs r0, r1, r0
+ ldr r1, [sp, #136] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #212]
+ adcs r0, r1, r0
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #216]
+ adcs r0, r1, r0
+ ldr r1, [sp, #144] @ 4-byte Reload
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #220]
+ adcs r0, r1, r0
+ str r0, [sp, #144] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldm r3, {r1, r2, r7}
+ ldr r0, [r3, #64]
+ ldr r5, [r3, #12]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ subs r12, r4, r1
+ ldr r1, [r3, #40]
+ sbcs r4, r6, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ sbcs r6, r9, r7
+ ldr r7, [r3, #32]
+ ldr r9, [r3, #28]
+ sbcs r10, lr, r5
+ ldr r5, [r3, #16]
+ ldr lr, [r3, #24]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ sbcs r2, r2, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r3, #60]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r3, #20]
+ ldr r3, [sp, #104] @ 4-byte Reload
+ sbcs r3, r3, r0
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs lr, r0, lr
+ ldr r0, [sp, #112] @ 4-byte Reload
+ sbcs r5, r0, r9
+ ldr r0, [sp, #116] @ 4-byte Reload
+ sbcs r8, r0, r7
+ ldr r0, [sp, #44] @ 4-byte Reload
+ ldr r7, [sp, #60] @ 4-byte Reload
+ sbcs r9, r11, r0
+ ldr r0, [sp, #120] @ 4-byte Reload
+ sbcs r11, r0, r1
+ ldr r0, [sp, #124] @ 4-byte Reload
+ ldr r1, [sp, #56] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #92] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ sbcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #144] @ 4-byte Reload
+ sbcs r0, r0, r1
+ str r0, [sp, #148] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbc r0, r0, #0
+ ands r1, r0, #1
+ ldr r0, [sp, #52] @ 4-byte Reload
+ movne r4, r7
+ movne r12, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r12, [r0]
+ str r4, [r0, #4]
+ ldr r4, [sp, #68] @ 4-byte Reload
+ movne r6, r4
+ cmp r1, #0
+ str r6, [r0, #8]
+ ldr r6, [sp, #72] @ 4-byte Reload
+ movne r10, r6
+ ldr r6, [sp, #100] @ 4-byte Reload
+ str r10, [r0, #12]
+ movne r2, r6
+ str r2, [r0, #16]
+ ldr r2, [sp, #104] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #20]
+ ldr r3, [sp, #56] @ 4-byte Reload
+ movne lr, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ str lr, [r0, #24]
+ movne r5, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r5, [r0, #28]
+ movne r8, r2
+ ldr r2, [sp, #76] @ 4-byte Reload
+ cmp r1, #0
+ str r8, [r0, #32]
+ movne r9, r2
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str r9, [r0, #36]
+ movne r11, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ str r11, [r0, #40]
+ movne r3, r2
+ ldr r2, [sp, #128] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #44]
+ ldr r3, [sp, #80] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #132] @ 4-byte Reload
+ str r3, [r0, #48]
+ ldr r3, [sp, #84] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #136] @ 4-byte Reload
+ str r3, [r0, #52]
+ ldr r3, [sp, #88] @ 4-byte Reload
+ movne r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #140] @ 4-byte Reload
+ ldr r2, [sp, #92] @ 4-byte Reload
+ str r3, [r0, #56]
+ movne r2, r1
+ ldr r1, [sp, #144] @ 4-byte Reload
+ str r2, [r0, #60]
+ ldr r2, [sp, #148] @ 4-byte Reload
+ movne r2, r1
+ str r2, [r0, #64]
+ add sp, sp, #556
+ add sp, sp, #2048
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end258:
+ .size mcl_fp_mont17L, .Lfunc_end258-mcl_fp_mont17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montNF17L
+ .align 2
+ .type mcl_fp_montNF17L,%function
+mcl_fp_montNF17L: @ @mcl_fp_montNF17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #548
+ sub sp, sp, #548
+ .pad #2048
+ sub sp, sp, #2048
+ add r12, sp, #132
+ add r6, sp, #2048
+ mov r4, r3
+ stm r12, {r1, r2, r3}
+ str r0, [sp, #92] @ 4-byte Spill
+ add r0, r6, #472
+ ldr r5, [r3, #-4]
+ ldr r2, [r2]
+ str r5, [sp, #128] @ 4-byte Spill
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2520]
+ ldr r1, [sp, #2524]
+ str r0, [sp, #72] @ 4-byte Spill
+ mul r2, r0, r5
+ ldr r0, [sp, #2588]
+ str r1, [sp, #100] @ 4-byte Spill
+ ldr r1, [sp, #2528]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #2584]
+ str r1, [sp, #96] @ 4-byte Spill
+ ldr r1, [sp, #2532]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #2580]
+ str r1, [sp, #88] @ 4-byte Spill
+ mov r1, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #2576]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #2572]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #2568]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #2564]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #2560]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #2556]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #2552]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #2548]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #2544]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2540]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2536]
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #2448
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2516]
+ add r11, sp, #2448
+ ldr r9, [sp, #2476]
+ ldr r4, [sp, #2472]
+ ldr r7, [sp, #2468]
+ ldr r6, [sp, #2464]
+ add lr, sp, #2048
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #2512]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2508]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2504]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2500]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2496]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2492]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2488]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2484]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2480]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r5, [sp, #2460]
+ ldr r2, [r0, #4]
+ add r0, lr, #328
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r8, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r10, r0
+ add r10, sp, #2416
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r5, r0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r7, r0
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r4, r0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r9, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r1, r0
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adc r0, r1, r0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #2444]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2440]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2436]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2432]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #2428]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #2376]
+ ldr r6, [sp, #100] @ 4-byte Reload
+ ldr r0, [sp, #2380]
+ ldr r1, [sp, #2384]
+ ldr r2, [sp, #2388]
+ ldr r3, [sp, #2392]
+ ldr r12, [sp, #2396]
+ ldr lr, [sp, #2400]
+ ldr r4, [sp, #2404]
+ ldr r5, [sp, #2408]
+ ldr r11, [sp, #2412]
+ adds r7, r6, r7
+ ldr r6, [sp, #96] @ 4-byte Reload
+ str r7, [sp, #24] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #2304
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2372]
+ add r11, sp, #2304
+ ldr r4, [sp, #2332]
+ ldr r5, [sp, #2328]
+ ldr r6, [sp, #2324]
+ ldr r7, [sp, #2320]
+ add lr, sp, #2048
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2368]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2364]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2360]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2356]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2352]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #2348]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2344]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2340]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2336]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #8]
+ add r0, lr, #184
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #24] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #2272
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2300]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2296]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2292]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2288]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2284]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #2232]
+ ldr r6, [sp, #124] @ 4-byte Reload
+ ldr r0, [sp, #2236]
+ ldr r1, [sp, #2240]
+ ldr r2, [sp, #2244]
+ ldr r3, [sp, #2248]
+ ldr r12, [sp, #2252]
+ ldr lr, [sp, #2256]
+ ldr r4, [sp, #2260]
+ ldr r5, [sp, #2264]
+ ldr r11, [sp, #2268]
+ adds r7, r6, r7
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #2160
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2228]
+ add r11, sp, #2160
+ ldr r4, [sp, #2188]
+ ldr r5, [sp, #2184]
+ ldr r6, [sp, #2180]
+ ldr r7, [sp, #2176]
+ add lr, sp, #2048
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2224]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2220]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2216]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2212]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2208]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2204]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2200]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2196]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2192]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #12]
+ add r0, lr, #40
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #2128
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2156]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2152]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2148]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2144]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2140]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #2088]
+ ldr r6, [sp, #124] @ 4-byte Reload
+ ldr r0, [sp, #2092]
+ ldr r1, [sp, #2096]
+ ldr r2, [sp, #2100]
+ ldr r3, [sp, #2104]
+ ldr r12, [sp, #2108]
+ ldr lr, [sp, #2112]
+ ldr r4, [sp, #2116]
+ ldr r5, [sp, #2120]
+ ldr r11, [sp, #2124]
+ adds r7, r6, r7
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #2016
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #2084]
+ add r11, sp, #2016
+ ldr r4, [sp, #2044]
+ ldr r5, [sp, #2040]
+ ldr r6, [sp, #2036]
+ ldr r7, [sp, #2032]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2080]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2076]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2072]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #2068]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #2064]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #2060]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #2056]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #2052]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #2048]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #16]
+ add r0, lr, #920
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1984
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #2012]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #2008]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #2004]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #2000]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1996]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #1944]
+ ldr r6, [sp, #124] @ 4-byte Reload
+ ldr r0, [sp, #1948]
+ ldr r1, [sp, #1952]
+ ldr r2, [sp, #1956]
+ ldr r3, [sp, #1960]
+ ldr r12, [sp, #1964]
+ ldr lr, [sp, #1968]
+ ldr r4, [sp, #1972]
+ ldr r5, [sp, #1976]
+ ldr r11, [sp, #1980]
+ adds r7, r6, r7
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1872
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1940]
+ add r11, sp, #1872
+ ldr r4, [sp, #1900]
+ ldr r5, [sp, #1896]
+ ldr r6, [sp, #1892]
+ ldr r7, [sp, #1888]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1936]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1932]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1928]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1924]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1920]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1916]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1912]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1908]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1904]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #20]
+ add r0, lr, #776
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1840
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1868]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1864]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1860]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1856]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1852]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #1800]
+ ldr r6, [sp, #124] @ 4-byte Reload
+ ldr r0, [sp, #1804]
+ ldr r1, [sp, #1808]
+ ldr r2, [sp, #1812]
+ ldr r3, [sp, #1816]
+ ldr r12, [sp, #1820]
+ ldr lr, [sp, #1824]
+ ldr r4, [sp, #1828]
+ ldr r5, [sp, #1832]
+ ldr r11, [sp, #1836]
+ adds r7, r6, r7
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1728
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1796]
+ add r11, sp, #1728
+ ldr r4, [sp, #1756]
+ ldr r5, [sp, #1752]
+ ldr r6, [sp, #1748]
+ ldr r7, [sp, #1744]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1792]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1788]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1784]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1780]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1776]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1772]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1768]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1764]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1760]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #24]
+ add r0, lr, #632
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1696
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1724]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1720]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1716]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1712]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1708]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #1656]
+ ldr r6, [sp, #124] @ 4-byte Reload
+ ldr r0, [sp, #1660]
+ ldr r1, [sp, #1664]
+ ldr r2, [sp, #1668]
+ ldr r3, [sp, #1672]
+ ldr r12, [sp, #1676]
+ ldr lr, [sp, #1680]
+ ldr r4, [sp, #1684]
+ ldr r5, [sp, #1688]
+ ldr r11, [sp, #1692]
+ adds r7, r6, r7
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1584
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1652]
+ add r11, sp, #1584
+ ldr r4, [sp, #1612]
+ ldr r5, [sp, #1608]
+ ldr r6, [sp, #1604]
+ ldr r7, [sp, #1600]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1648]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1644]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1640]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1636]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1632]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1628]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1624]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1620]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1616]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #28]
+ add r0, lr, #488
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1552
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1580]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1576]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1572]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1568]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1564]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #1512]
+ ldr r6, [sp, #124] @ 4-byte Reload
+ ldr r0, [sp, #1516]
+ ldr r1, [sp, #1520]
+ ldr r2, [sp, #1524]
+ ldr r3, [sp, #1528]
+ ldr r12, [sp, #1532]
+ ldr lr, [sp, #1536]
+ ldr r4, [sp, #1540]
+ ldr r5, [sp, #1544]
+ ldr r11, [sp, #1548]
+ adds r7, r6, r7
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1440
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1508]
+ add r11, sp, #1440
+ ldr r4, [sp, #1468]
+ ldr r5, [sp, #1464]
+ ldr r6, [sp, #1460]
+ ldr r7, [sp, #1456]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1504]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1500]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1496]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1492]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1488]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1484]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1480]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1476]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1472]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #32]
+ add r0, lr, #344
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1408
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1436]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1432]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1428]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1424]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1420]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #1368]
+ ldr r6, [sp, #124] @ 4-byte Reload
+ ldr r0, [sp, #1372]
+ ldr r1, [sp, #1376]
+ ldr r2, [sp, #1380]
+ ldr r3, [sp, #1384]
+ ldr r12, [sp, #1388]
+ ldr lr, [sp, #1392]
+ ldr r4, [sp, #1396]
+ ldr r5, [sp, #1400]
+ ldr r11, [sp, #1404]
+ adds r7, r6, r7
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #1296
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1364]
+ add r11, sp, #1296
+ ldr r4, [sp, #1324]
+ ldr r5, [sp, #1320]
+ ldr r6, [sp, #1316]
+ ldr r7, [sp, #1312]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1360]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1356]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1352]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1348]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1344]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1340]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1336]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1332]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1328]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #36]
+ add r0, lr, #200
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1264
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #1292]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1288]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1284]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1280]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1276]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r8, r9, r10}
+ ldr r7, [sp, #1224]
+ ldr r6, [sp, #124] @ 4-byte Reload
+ ldr r0, [sp, #1228]
+ ldr r1, [sp, #1232]
+ ldr r2, [sp, #1236]
+ ldr r3, [sp, #1240]
+ ldr r12, [sp, #1244]
+ ldr lr, [sp, #1248]
+ ldr r4, [sp, #1252]
+ ldr r5, [sp, #1256]
+ ldr r11, [sp, #1260]
+ adds r7, r6, r7
+ ldr r6, [sp, #120] @ 4-byte Reload
+ str r7, [sp, #32] @ 4-byte Spill
+ adcs r0, r6, r0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ mul r2, r7, r5
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #1152
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1220]
+ add r11, sp, #1152
+ ldr r4, [sp, #1176]
+ ldr r6, [sp, #1172]
+ ldr r7, [sp, #1168]
+ add lr, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1216]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1212]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1208]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1204]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1200]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1196]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1192]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1188]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1184]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1180]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #40]
+ add r0, lr, #56
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #32] @ 4-byte Reload
+ ldr r1, [sp, #8] @ 4-byte Reload
+ adds r0, r0, r8
+ ldr r8, [sp, #1092]
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r2, r0, r9
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #1120
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r7
+ ldr r7, [sp, #1084]
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #1088]
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r4
+ ldr r4, [sp, #1080]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #56] @ 4-byte Spill
+ adds r0, r2, r4
+ mul r1, r0, r5
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r1, [sp, #48] @ 4-byte Spill
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #1144]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1140]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1136]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldm r10, {r4, r5, r9, r10}
+ ldr r11, [sp, #120] @ 4-byte Reload
+ ldr r0, [sp, #1096]
+ ldr r1, [sp, #1100]
+ ldr r2, [sp, #1104]
+ ldr r3, [sp, #1108]
+ ldr r12, [sp, #1112]
+ ldr lr, [sp, #1116]
+ adcs r7, r11, r7
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [sp, #116] @ 4-byte Reload
+ adcs r6, r7, r6
+ str r6, [sp, #116] @ 4-byte Spill
+ ldr r6, [sp, #112] @ 4-byte Reload
+ adcs r6, r6, r8
+ str r6, [sp, #112] @ 4-byte Spill
+ ldr r6, [sp, #108] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #1008
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1076]
+ add r11, sp, #1008
+ ldr r4, [sp, #1036]
+ ldr r5, [sp, #1032]
+ ldr r6, [sp, #1028]
+ ldr r7, [sp, #1024]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #1072]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1068]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1064]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1060]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1056]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1052]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1048]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1044]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1040]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldm r11, {r8, r9, r10, r11}
+ ldr r0, [sp, #136] @ 4-byte Reload
+ ldr r1, [sp, #132] @ 4-byte Reload
+ ldr r2, [r0, #44]
+ add r0, sp, #936
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #124] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #952
+ adds r0, r0, r8
+ add r8, sp, #936
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r2, r0, r9
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #976
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #48] @ 4-byte Spill
+ ldm r8, {r4, r6, r7, r8}
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adds r1, r2, r4
+ mul r2, r1, r0
+ ldr r0, [sp, #1004]
+ str r1, [sp, #124] @ 4-byte Spill
+ str r2, [sp, #24] @ 4-byte Spill
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1000]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #996]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #992]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r5, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #120] @ 4-byte Reload
+ adcs r6, r11, r6
+ str r6, [sp, #76] @ 4-byte Spill
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r6, r6, r7
+ str r6, [sp, #72] @ 4-byte Spill
+ ldr r6, [sp, #112] @ 4-byte Reload
+ adcs r6, r6, r8
+ str r6, [sp, #68] @ 4-byte Spill
+ ldr r6, [sp, #108] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #80] @ 4-byte Spill
+ add r0, sp, #864
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #932]
+ ldr r5, [sp, #892]
+ ldr r7, [sp, #888]
+ ldr r4, [sp, #884]
+ ldr r9, [sp, #880]
+ ldr r8, [sp, #864]
+ ldr r11, [sp, #868]
+ ldr r10, [sp, #872]
+ ldr r6, [sp, #876]
+ add r0, sp, #792
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #928]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #924]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #920]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #916]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #912]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #908]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #904]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #900]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #896]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [r1, #48]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #124] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #796
+ adds r0, r0, r8
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r10
+ add r10, sp, #820
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #860]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #856]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #852]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #848]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #844]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #840]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r7, [sp, #792]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #124] @ 4-byte Reload
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adds r7, r11, r7
+ adcs r0, r6, r0
+ str r7, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #720
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #788]
+ add r11, sp, #728
+ ldr r5, [sp, #748]
+ ldr r9, [sp, #744]
+ ldr r10, [sp, #720]
+ ldr r6, [sp, #724]
+ add r0, sp, #648
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #784]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #780]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #776]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #772]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #768]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #764]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #760]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #756]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #752]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r7, r8, r11}
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [r1, #52]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #652
+ adds r0, r0, r10
+ add r10, sp, #676
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #716]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #712]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #708]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #704]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #700]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #696]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r7, [sp, #648]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #80] @ 4-byte Reload
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adds r7, r11, r7
+ adcs r0, r6, r0
+ str r7, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #576
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #644]
+ add r11, sp, #584
+ ldr r5, [sp, #604]
+ ldr r9, [sp, #600]
+ ldr r10, [sp, #576]
+ ldr r6, [sp, #580]
+ add r0, sp, #504
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #640]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #636]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #632]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #628]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #624]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #620]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #616]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #612]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #608]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r7, r8, r11}
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [r1, #56]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #508
+ adds r0, r0, r10
+ add r10, sp, #532
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #572]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #568]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #564]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #560]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #556]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #552]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r7, [sp, #504]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #80] @ 4-byte Reload
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adds r7, r11, r7
+ adcs r0, r6, r0
+ str r7, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #432
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #500]
+ add r11, sp, #440
+ ldr r5, [sp, #460]
+ ldr r9, [sp, #456]
+ ldr r10, [sp, #432]
+ ldr r6, [sp, #436]
+ add r0, sp, #360
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #496]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #492]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #488]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #484]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #480]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #476]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #472]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #468]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #464]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r11, {r4, r7, r8, r11}
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [r1, #60]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #364
+ adds r0, r0, r10
+ add r10, sp, #388
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #428]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #424]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #420]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #416]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #412]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #408]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r10, {r4, r5, r8, r9, r10}
+ ldr r7, [sp, #360]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #80] @ 4-byte Reload
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adds r7, r11, r7
+ adcs r0, r6, r0
+ str r7, [sp, #28] @ 4-byte Spill
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, sp, #288
+ bl .LmulPv544x32(PLT)
+ ldr r1, [sp, #356]
+ add r8, sp, #288
+ ldr r9, [sp, #316]
+ ldr r10, [sp, #312]
+ ldr r11, [sp, #308]
+ ldr r6, [sp, #304]
+ add r0, sp, #216
+ str r1, [sp, #52] @ 4-byte Spill
+ ldr r1, [sp, #352]
+ str r1, [sp, #48] @ 4-byte Spill
+ ldr r1, [sp, #348]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #344]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #340]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #336]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #332]
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [sp, #328]
+ str r1, [sp, #20] @ 4-byte Spill
+ ldr r1, [sp, #324]
+ str r1, [sp, #16] @ 4-byte Spill
+ ldr r1, [sp, #320]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldm r8, {r4, r5, r8}
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r7, [sp, #300]
+ ldr r2, [r1, #64]
+ ldr r1, [sp, #132] @ 4-byte Reload
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #28] @ 4-byte Reload
+ ldr r1, [sp, #12] @ 4-byte Reload
+ add lr, sp, #232
+ adds r0, r0, r4
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r2, r0, r5
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r9
+ add r9, sp, #216
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adc r0, r0, r1
+ str r0, [sp, #84] @ 4-byte Spill
+ ldm r9, {r4, r8, r9}
+ ldr r0, [sp, #128] @ 4-byte Reload
+ ldr r7, [sp, #228]
+ ldr r5, [sp, #260]
+ adds r11, r2, r4
+ ldr r4, [sp, #256]
+ mul r1, r11, r0
+ ldr r0, [sp, #284]
+ str r1, [sp, #64] @ 4-byte Spill
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #280]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #276]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #272]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #268]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #264]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r10, [sp, #136] @ 4-byte Reload
+ ldr r6, [sp, #132] @ 4-byte Reload
+ adcs r8, r10, r8
+ ldr r10, [sp, #140] @ 4-byte Reload
+ adcs r9, r6, r9
+ ldr r6, [sp, #80] @ 4-byte Reload
+ adcs r7, r6, r7
+ ldr r6, [sp, #76] @ 4-byte Reload
+ adcs r0, r6, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #88] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r10
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ add r0, sp, #144
+ bl .LmulPv544x32(PLT)
+ add r3, sp, #144
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r11, r0
+ adcs r4, r8, r1
+ ldr r0, [sp, #160]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r8, r9, r2
+ str r4, [sp, #52] @ 4-byte Spill
+ adcs r9, r7, r3
+ mov r3, r10
+ str r8, [sp, #60] @ 4-byte Spill
+ str r9, [sp, #64] @ 4-byte Spill
+ adcs r5, r1, r0
+ ldr r0, [sp, #164]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r5, [sp, #68] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #168]
+ adcs lr, r1, r0
+ ldr r0, [sp, #172]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str lr, [sp, #48] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #176]
+ adcs r0, r1, r0
+ ldr r1, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #180]
+ adcs r0, r1, r0
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #184]
+ adcs r0, r1, r0
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #188]
+ adcs r0, r1, r0
+ ldr r1, [sp, #124] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #192]
+ adcs r0, r1, r0
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #196]
+ adcs r0, r1, r0
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #200]
+ adcs r0, r6, r0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #204]
+ adcs r0, r1, r0
+ ldr r1, [sp, #136] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #208]
+ adcs r0, r1, r0
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #212]
+ adc r1, r1, r0
+ str r1, [sp, #88] @ 4-byte Spill
+ ldm r3, {r0, r2, r7}
+ ldr r6, [r3, #12]
+ ldr r11, [r3, #36]
+ ldr r10, [r3, #32]
+ subs r12, r4, r0
+ ldr r0, [r3, #64]
+ sbcs r4, r8, r2
+ ldr r2, [sp, #96] @ 4-byte Reload
+ sbcs r8, r9, r7
+ ldr r7, [r3, #20]
+ sbcs r9, r5, r6
+ ldr r6, [r3, #24]
+ ldr r5, [r3, #28]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r3, #60]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ sbcs r2, r2, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ sbcs r3, lr, r7
+ ldr r7, [sp, #56] @ 4-byte Reload
+ sbcs lr, r0, r6
+ ldr r0, [sp, #104] @ 4-byte Reload
+ sbcs r5, r0, r5
+ ldr r0, [sp, #108] @ 4-byte Reload
+ sbcs r6, r0, r10
+ ldr r0, [sp, #112] @ 4-byte Reload
+ sbcs r11, r0, r11
+ ldr r0, [sp, #116] @ 4-byte Reload
+ sbcs r0, r0, r7
+ ldr r7, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ sbcs r0, r0, r7
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ sbcs r0, r0, r7
+ ldr r7, [sp, #80] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ sbcs r0, r0, r7
+ ldr r7, [sp, #84] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ sbcs r0, r0, r7
+ ldr r7, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ sbcs r0, r0, r7
+ ldr r7, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbc r10, r1, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ asr r1, r10, #31
+ cmp r1, #0
+ movlt r4, r7
+ movlt r12, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r12, [r0]
+ str r4, [r0, #4]
+ ldr r4, [sp, #64] @ 4-byte Reload
+ movlt r8, r4
+ ldr r4, [sp, #68] @ 4-byte Reload
+ cmp r1, #0
+ str r8, [r0, #8]
+ movlt r9, r4
+ ldr r4, [sp, #96] @ 4-byte Reload
+ str r9, [r0, #12]
+ movlt r2, r4
+ str r2, [r0, #16]
+ ldr r2, [sp, #48] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #100] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #20]
+ ldr r3, [sp, #56] @ 4-byte Reload
+ movlt lr, r2
+ ldr r2, [sp, #104] @ 4-byte Reload
+ str lr, [r0, #24]
+ movlt r5, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ str r5, [r0, #28]
+ movlt r6, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ cmp r1, #0
+ str r6, [r0, #32]
+ movlt r11, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r11, [r0, #36]
+ movlt r3, r2
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str r3, [r0, #40]
+ ldr r3, [sp, #72] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #44]
+ ldr r3, [sp, #76] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #128] @ 4-byte Reload
+ str r3, [r0, #48]
+ ldr r3, [sp, #80] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #132] @ 4-byte Reload
+ str r3, [r0, #52]
+ ldr r3, [sp, #84] @ 4-byte Reload
+ movlt r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #136] @ 4-byte Reload
+ ldr r2, [sp, #140] @ 4-byte Reload
+ str r3, [r0, #56]
+ movlt r2, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r2, [r0, #60]
+ movlt r10, r1
+ str r10, [r0, #64]
+ add sp, sp, #548
+ add sp, sp, #2048
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end259:
+ .size mcl_fp_montNF17L, .Lfunc_end259-mcl_fp_montNF17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_montRed17L
+ .align 2
+ .type mcl_fp_montRed17L,%function
+mcl_fp_montRed17L: @ @mcl_fp_montRed17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #444
+ sub sp, sp, #444
+ .pad #1024
+ sub sp, sp, #1024
+ mov r3, r2
+ str r0, [sp, #212] @ 4-byte Spill
+ ldr r2, [r1, #4]
+ ldr r7, [r1]
+ ldr r0, [r3]
+ str r3, [sp, #236] @ 4-byte Spill
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [r1, #8]
+ str r0, [sp, #200] @ 4-byte Spill
+ ldr r0, [r3, #4]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #12]
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [r3, #8]
+ str r2, [sp, #108] @ 4-byte Spill
+ str r0, [sp, #192] @ 4-byte Spill
+ ldr r0, [r3, #12]
+ str r0, [sp, #176] @ 4-byte Spill
+ ldr r0, [r3, #16]
+ str r0, [sp, #180] @ 4-byte Spill
+ ldr r0, [r3, #20]
+ str r0, [sp, #184] @ 4-byte Spill
+ ldr r0, [r3, #24]
+ str r0, [sp, #188] @ 4-byte Spill
+ ldr r0, [r3, #-4]
+ str r0, [sp, #232] @ 4-byte Spill
+ mul r2, r7, r0
+ ldr r0, [r3, #60]
+ str r0, [sp, #204] @ 4-byte Spill
+ ldr r0, [r3, #64]
+ str r0, [sp, #208] @ 4-byte Spill
+ ldr r0, [r3, #28]
+ str r0, [sp, #148] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #152] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #156] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #160] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #164] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #168] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #172] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp, #144] @ 4-byte Spill
+ ldr r0, [r1, #128]
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [r1, #132]
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [r1, #96]
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [r1, #104]
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [r1, #108]
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [r1, #112]
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [r1, #116]
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [r1, #120]
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [r1, #124]
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [r1, #100]
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r1, #68]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r1, #72]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r1, #76]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r1, #80]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r1, #84]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r1, #88]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r1, #92]
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [r1, #32]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [r1, #36]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [r1, #40]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [r1, #44]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #28]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [r1, #24]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [r1, #20]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r1, #16]
+ mov r1, r3
+ str r0, [sp, #20] @ 4-byte Spill
+ add r0, sp, #1392
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1460]
+ ldr r11, [sp, #1392]
+ ldr r1, [sp, #1400]
+ ldr r2, [sp, #1404]
+ ldr r3, [sp, #1408]
+ ldr r12, [sp, #1412]
+ ldr lr, [sp, #1416]
+ ldr r4, [sp, #1420]
+ ldr r5, [sp, #1424]
+ ldr r6, [sp, #1428]
+ ldr r8, [sp, #1432]
+ ldr r9, [sp, #1436]
+ ldr r10, [sp, #1440]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1456]
+ adds r7, r7, r11
+ ldr r7, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1452]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1448]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1444]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1396]
+ adcs r7, r7, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #20] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #236] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ add r9, sp, #1024
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ mov r0, #0
+ adc r0, r0, #0
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #232] @ 4-byte Reload
+ mul r2, r7, r0
+ add r0, r9, #296
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1388]
+ ldr r9, [sp, #1320]
+ ldr r1, [sp, #1328]
+ ldr r2, [sp, #1332]
+ ldr r3, [sp, #1336]
+ ldr r12, [sp, #1340]
+ ldr r10, [sp, #1344]
+ ldr lr, [sp, #1348]
+ ldr r4, [sp, #1352]
+ ldr r5, [sp, #1356]
+ ldr r8, [sp, #1360]
+ ldr r11, [sp, #1364]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1384]
+ adds r7, r7, r9
+ ldr r7, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1380]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1376]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1372]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1368]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #1324]
+ adcs r7, r7, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #232] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ mul r2, r7, r5
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #28] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #32] @ 4-byte Spill
+ add r0, sp, #1248
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1316]
+ add r10, sp, #1280
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1312]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1308]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1304]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1300]
+ str r0, [sp, #12] @ 4-byte Spill
+ ldr r0, [sp, #1296]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldm r10, {r4, r6, r9, r10}
+ ldr r8, [sp, #1248]
+ ldr r0, [sp, #1252]
+ ldr r1, [sp, #1256]
+ ldr r2, [sp, #1260]
+ ldr r3, [sp, #1264]
+ ldr r12, [sp, #1268]
+ ldr lr, [sp, #1272]
+ ldr r11, [sp, #1276]
+ adds r7, r7, r8
+ ldr r7, [sp, #116] @ 4-byte Reload
+ adcs r7, r7, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r7, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ add r9, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #12] @ 4-byte Reload
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #236] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #32] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #36] @ 4-byte Spill
+ add r0, r9, #152
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1244]
+ ldr r9, [sp, #1176]
+ ldr r1, [sp, #1184]
+ ldr r2, [sp, #1188]
+ ldr r3, [sp, #1192]
+ ldr r12, [sp, #1196]
+ ldr lr, [sp, #1200]
+ ldr r4, [sp, #1204]
+ ldr r5, [sp, #1208]
+ ldr r6, [sp, #1212]
+ ldr r8, [sp, #1216]
+ ldr r10, [sp, #1220]
+ ldr r11, [sp, #1224]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1240]
+ adds r7, r7, r9
+ ldr r7, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1236]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1232]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1228]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1180]
+ adcs r7, r7, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ str r7, [sp, #12] @ 4-byte Spill
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #236] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #232] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ mul r2, r7, r6
+ adcs r0, r0, r8
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #36] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #40] @ 4-byte Spill
+ add r0, sp, #1104
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1172]
+ ldr r4, [sp, #1104]
+ ldr r9, [sp, #12] @ 4-byte Reload
+ ldr r1, [sp, #1112]
+ ldr r2, [sp, #1116]
+ ldr r3, [sp, #1120]
+ ldr r12, [sp, #1124]
+ ldr r10, [sp, #1128]
+ ldr r11, [sp, #1132]
+ ldr lr, [sp, #1136]
+ ldr r7, [sp, #1140]
+ ldr r8, [sp, #1144]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1168]
+ adds r4, r9, r4
+ ldr r4, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1164]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1160]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1156]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #1152]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #1148]
+ str r0, [sp, #8] @ 4-byte Spill
+ ldr r0, [sp, #1108]
+ adcs r4, r4, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #8] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r4, r6
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ mov r7, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r8
+ add r8, sp, #1024
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r5
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #40] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #44] @ 4-byte Spill
+ add r0, r8, #8
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1100]
+ ldr r8, [sp, #1032]
+ ldr r1, [sp, #1040]
+ ldr r2, [sp, #1044]
+ ldr r3, [sp, #1048]
+ ldr r12, [sp, #1052]
+ ldr lr, [sp, #1056]
+ ldr r4, [sp, #1060]
+ ldr r5, [sp, #1064]
+ ldr r6, [sp, #1068]
+ ldr r9, [sp, #1072]
+ ldr r10, [sp, #1076]
+ ldr r11, [sp, #1080]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1096]
+ adds r7, r7, r8
+ ldr r7, [sp, #116] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1092]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1088]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1084]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1036]
+ adcs r7, r7, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ str r7, [sp, #20] @ 4-byte Spill
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #232] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ mul r2, r7, r5
+ adcs r0, r0, r6
+ ldr r6, [sp, #236] @ 4-byte Reload
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #48] @ 4-byte Spill
+ add r0, sp, #960
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #1028]
+ add lr, sp, #984
+ add r12, sp, #964
+ ldr r8, [sp, #1000]
+ ldr r7, [sp, #996]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #1024]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #1020]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #1016]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #1012]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #1008]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #1004]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldm lr, {r10, r11, lr}
+ ldr r4, [sp, #960]
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r9, [sp, #20] @ 4-byte Reload
+ adds r4, r9, r4
+ ldr r4, [sp, #116] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r4, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r7
+ mov r7, r4
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #48] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ add r0, sp, #888
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #956]
+ add r11, sp, #916
+ add lr, sp, #892
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #952]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #948]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #944]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #940]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldm r11, {r4, r5, r6, r9, r10, r11}
+ ldr r8, [sp, #888]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r7, r7, r8
+ ldr r7, [sp, #116] @ 4-byte Reload
+ adcs r7, r7, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ str r7, [sp, #28] @ 4-byte Spill
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #232] @ 4-byte Reload
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ mul r2, r7, r5
+ ldr r7, [sp, #236] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r11
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r7
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ add r0, sp, #816
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #884]
+ add lr, sp, #840
+ add r12, sp, #820
+ ldr r8, [sp, #856]
+ ldr r6, [sp, #852]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #880]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #876]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #872]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #868]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #864]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #860]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm lr, {r10, r11, lr}
+ ldr r4, [sp, #816]
+ ldm r12, {r0, r1, r2, r3, r12}
+ ldr r9, [sp, #28] @ 4-byte Reload
+ adds r4, r9, r4
+ ldr r4, [sp, #116] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r4, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r11
+ mov r11, r4
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r7
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ add r0, sp, #744
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #812]
+ add r10, sp, #768
+ add lr, sp, #744
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #808]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #804]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #800]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #796]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #232] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ mul r2, r11, r5
+ adcs r0, r0, r6
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #236] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #64] @ 4-byte Spill
+ add r0, sp, #672
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #740]
+ add r9, sp, #704
+ add r12, sp, #676
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #736]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #732]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #728]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #724]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #720]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldm r9, {r6, r7, r8, r9}
+ ldr r4, [sp, #672]
+ ldr lr, [sp, #700]
+ ldr r10, [sp, #696]
+ ldm r12, {r0, r1, r2, r3, r12}
+ adds r4, r11, r4
+ ldr r4, [sp, #116] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r4, [sp, #236] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r5
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #68] @ 4-byte Spill
+ add r0, sp, #600
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #668]
+ add r10, sp, #624
+ add lr, sp, #600
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #664]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #660]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #656]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #652]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #648]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r9
+ ldr r9, [sp, #232] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ mul r2, r11, r9
+ adcs r0, r0, r10
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r4
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #72] @ 4-byte Spill
+ add r0, sp, #528
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #596]
+ add r8, sp, #560
+ add r12, sp, #532
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #592]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #588]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #584]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #580]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #576]
+ str r0, [sp, #48] @ 4-byte Spill
+ ldm r8, {r5, r6, r7, r8}
+ ldr r4, [sp, #528]
+ ldr lr, [sp, #556]
+ ldr r10, [sp, #552]
+ ldm r12, {r0, r1, r2, r3, r12}
+ adds r4, r11, r4
+ ldr r4, [sp, #116] @ 4-byte Reload
+ adcs r11, r4, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ mov r4, r9
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #236] @ 4-byte Reload
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ add r0, sp, #456
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #524]
+ add r10, sp, #480
+ add lr, sp, #456
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #520]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #516]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #512]
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #508]
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #504]
+ str r0, [sp, #52] @ 4-byte Spill
+ ldm r10, {r5, r6, r7, r8, r9, r10}
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r0, r11, r0
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #112] @ 4-byte Reload
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, r2
+ mul r2, r11, r4
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r6
+ ldr r6, [sp, #236] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r7
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #60] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #56] @ 4-byte Spill
+ ldr r0, [sp, #228] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #52] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #76] @ 4-byte Spill
+ add r0, sp, #384
+ bl .LmulPv544x32(PLT)
+ ldr r0, [sp, #452]
+ add r10, sp, #412
+ add lr, sp, #388
+ str r0, [sp, #48] @ 4-byte Spill
+ ldr r0, [sp, #448]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [sp, #444]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #440]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #436]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #432]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldm r10, {r5, r7, r8, r9, r10}
+ ldr r4, [sp, #384]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ adds r4, r11, r4
+ ldr r4, [sp, #116] @ 4-byte Reload
+ adcs r4, r4, r0
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #108] @ 4-byte Reload
+ ldr r1, [sp, #24] @ 4-byte Reload
+ adcs r0, r0, r2
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r5
+ ldr r5, [sp, #232] @ 4-byte Reload
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ mul r2, r4, r5
+ adcs r0, r0, r7
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #104] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r10
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #128] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #124] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #120] @ 4-byte Spill
+ ldr r0, [sp, #68] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #116] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r0, [sp, #112] @ 4-byte Spill
+ ldr r0, [sp, #60] @ 4-byte Reload
+ adcs r0, r0, r1
+ mov r1, r6
+ str r0, [sp, #108] @ 4-byte Spill
+ ldr r0, [sp, #56] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #104] @ 4-byte Spill
+ ldr r0, [sp, #52] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #96] @ 4-byte Spill
+ add r0, sp, #312
+ bl .LmulPv544x32(PLT)
+ add r6, sp, #312
+ add r10, sp, #356
+ add lr, sp, #328
+ ldm r6, {r0, r1, r3, r6}
+ adds r0, r4, r0
+ adcs r7, r11, r1
+ mul r0, r7, r5
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #380]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adcs r0, r0, r3
+ str r0, [sp, #232] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #376]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r8, r10}
+ ldr r9, [sp, #352]
+ ldm lr, {r0, r1, r2, r3, r12, lr}
+ ldr r11, [sp, #228] @ 4-byte Reload
+ adcs r0, r11, r0
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #224] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #220] @ 4-byte Reload
+ adcs r0, r0, r2
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #216] @ 4-byte Reload
+ adcs r11, r0, r3
+ ldr r0, [sp, #140] @ 4-byte Reload
+ adcs r0, r0, r12
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #136] @ 4-byte Reload
+ adcs r0, r0, lr
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #132] @ 4-byte Reload
+ adcs r0, r0, r9
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adcs r0, r0, r4
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #124] @ 4-byte Reload
+ adcs r0, r0, r5
+ str r0, [sp, #136] @ 4-byte Spill
+ ldr r0, [sp, #120] @ 4-byte Reload
+ adcs r0, r0, r6
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #116] @ 4-byte Reload
+ adcs r0, r0, r8
+ str r0, [sp, #140] @ 4-byte Spill
+ ldr r0, [sp, #112] @ 4-byte Reload
+ adcs r10, r0, r10
+ ldr r0, [sp, #108] @ 4-byte Reload
+ adcs r8, r0, r1
+ ldr r0, [sp, #104] @ 4-byte Reload
+ ldr r1, [sp, #80] @ 4-byte Reload
+ adcs r6, r0, r1
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ldr r1, [sp, #236] @ 4-byte Reload
+ adcs r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #128] @ 4-byte Spill
+ add r0, sp, #240
+ bl .LmulPv544x32(PLT)
+ add r3, sp, #240
+ ldm r3, {r0, r1, r2, r3}
+ adds r0, r7, r0
+ ldr r0, [sp, #232] @ 4-byte Reload
+ adcs r9, r0, r1
+ ldr r0, [sp, #92] @ 4-byte Reload
+ ldr r1, [sp, #72] @ 4-byte Reload
+ str r9, [sp, #100] @ 4-byte Spill
+ adcs r12, r0, r2
+ ldr r0, [sp, #68] @ 4-byte Reload
+ str r12, [sp, #104] @ 4-byte Spill
+ adcs lr, r0, r3
+ ldr r0, [sp, #256]
+ str lr, [sp, #108] @ 4-byte Spill
+ adcs r4, r1, r0
+ ldr r0, [sp, #260]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ str r4, [sp, #112] @ 4-byte Spill
+ adcs r5, r1, r0
+ ldr r0, [sp, #264]
+ ldr r1, [sp, #84] @ 4-byte Reload
+ str r5, [sp, #116] @ 4-byte Spill
+ adcs r11, r11, r0
+ ldr r0, [sp, #268]
+ str r11, [sp, #120] @ 4-byte Spill
+ adcs r7, r1, r0
+ ldr r0, [sp, #272]
+ ldr r1, [sp, #224] @ 4-byte Reload
+ str r7, [sp, #124] @ 4-byte Spill
+ adcs r0, r1, r0
+ ldr r1, [sp, #220] @ 4-byte Reload
+ str r0, [sp, #224] @ 4-byte Spill
+ ldr r0, [sp, #276]
+ adcs r0, r1, r0
+ ldr r1, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #220] @ 4-byte Spill
+ ldr r0, [sp, #280]
+ adcs r0, r1, r0
+ ldr r1, [sp, #136] @ 4-byte Reload
+ str r0, [sp, #216] @ 4-byte Spill
+ ldr r0, [sp, #284]
+ adcs r0, r1, r0
+ ldr r1, [sp, #228] @ 4-byte Reload
+ str r0, [sp, #232] @ 4-byte Spill
+ ldr r0, [sp, #288]
+ adcs r0, r1, r0
+ ldr r1, [sp, #140] @ 4-byte Reload
+ str r0, [sp, #228] @ 4-byte Spill
+ ldr r0, [sp, #292]
+ adcs r0, r1, r0
+ ldr r1, [sp, #132] @ 4-byte Reload
+ str r0, [sp, #236] @ 4-byte Spill
+ ldr r0, [sp, #296]
+ adcs r10, r10, r0
+ ldr r0, [sp, #300]
+ str r10, [sp, #136] @ 4-byte Spill
+ adcs r8, r8, r0
+ ldr r0, [sp, #304]
+ str r8, [sp, #140] @ 4-byte Spill
+ adcs r6, r6, r0
+ ldr r0, [sp, #308]
+ adcs r2, r1, r0
+ ldr r0, [sp, #128] @ 4-byte Reload
+ adc r0, r0, #0
+ str r0, [sp, #132] @ 4-byte Spill
+ ldr r0, [sp, #200] @ 4-byte Reload
+ subs r1, r9, r0
+ ldr r0, [sp, #196] @ 4-byte Reload
+ sbcs r3, r12, r0
+ ldr r0, [sp, #192] @ 4-byte Reload
+ sbcs r12, lr, r0
+ ldr r0, [sp, #176] @ 4-byte Reload
+ sbcs lr, r4, r0
+ ldr r0, [sp, #180] @ 4-byte Reload
+ sbcs r4, r5, r0
+ ldr r0, [sp, #184] @ 4-byte Reload
+ sbcs r5, r11, r0
+ ldr r0, [sp, #188] @ 4-byte Reload
+ ldr r11, [sp, #224] @ 4-byte Reload
+ sbcs r9, r7, r0
+ ldr r0, [sp, #148] @ 4-byte Reload
+ ldr r7, [sp, #220] @ 4-byte Reload
+ sbcs r0, r11, r0
+ ldr r11, [sp, #232] @ 4-byte Reload
+ str r0, [sp, #176] @ 4-byte Spill
+ ldr r0, [sp, #144] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #216] @ 4-byte Reload
+ str r0, [sp, #180] @ 4-byte Spill
+ ldr r0, [sp, #152] @ 4-byte Reload
+ sbcs r0, r7, r0
+ ldr r7, [sp, #228] @ 4-byte Reload
+ str r0, [sp, #184] @ 4-byte Spill
+ ldr r0, [sp, #156] @ 4-byte Reload
+ sbcs r0, r11, r0
+ ldr r11, [sp, #236] @ 4-byte Reload
+ str r0, [sp, #188] @ 4-byte Spill
+ ldr r0, [sp, #160] @ 4-byte Reload
+ sbcs r0, r7, r0
+ str r0, [sp, #192] @ 4-byte Spill
+ ldr r0, [sp, #164] @ 4-byte Reload
+ sbcs r0, r11, r0
+ str r0, [sp, #196] @ 4-byte Spill
+ ldr r0, [sp, #168] @ 4-byte Reload
+ sbcs r0, r10, r0
+ mov r10, r6
+ str r0, [sp, #200] @ 4-byte Spill
+ ldr r0, [sp, #172] @ 4-byte Reload
+ sbcs r7, r8, r0
+ ldr r0, [sp, #204] @ 4-byte Reload
+ mov r8, r2
+ sbcs r11, r6, r0
+ ldr r0, [sp, #208] @ 4-byte Reload
+ sbcs r6, r2, r0
+ ldr r0, [sp, #132] @ 4-byte Reload
+ sbc r2, r0, #0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ ands r2, r2, #1
+ movne r1, r0
+ ldr r0, [sp, #212] @ 4-byte Reload
+ str r1, [r0]
+ ldr r1, [sp, #104] @ 4-byte Reload
+ movne r3, r1
+ ldr r1, [sp, #108] @ 4-byte Reload
+ str r3, [r0, #4]
+ ldr r3, [sp, #176] @ 4-byte Reload
+ movne r12, r1
+ ldr r1, [sp, #112] @ 4-byte Reload
+ cmp r2, #0
+ str r12, [r0, #8]
+ movne lr, r1
+ ldr r1, [sp, #116] @ 4-byte Reload
+ str lr, [r0, #12]
+ movne r4, r1
+ ldr r1, [sp, #120] @ 4-byte Reload
+ str r4, [r0, #16]
+ movne r5, r1
+ ldr r1, [sp, #124] @ 4-byte Reload
+ cmp r2, #0
+ str r5, [r0, #20]
+ movne r9, r1
+ ldr r1, [sp, #224] @ 4-byte Reload
+ str r9, [r0, #24]
+ movne r3, r1
+ ldr r1, [sp, #220] @ 4-byte Reload
+ str r3, [r0, #28]
+ ldr r3, [sp, #180] @ 4-byte Reload
+ movne r3, r1
+ ldr r1, [sp, #216] @ 4-byte Reload
+ cmp r2, #0
+ str r3, [r0, #32]
+ ldr r3, [sp, #184] @ 4-byte Reload
+ movne r3, r1
+ ldr r1, [sp, #232] @ 4-byte Reload
+ str r3, [r0, #36]
+ ldr r3, [sp, #188] @ 4-byte Reload
+ movne r3, r1
+ ldr r1, [sp, #228] @ 4-byte Reload
+ str r3, [r0, #40]
+ ldr r3, [sp, #192] @ 4-byte Reload
+ movne r3, r1
+ ldr r1, [sp, #236] @ 4-byte Reload
+ cmp r2, #0
+ str r3, [r0, #44]
+ ldr r3, [sp, #196] @ 4-byte Reload
+ movne r3, r1
+ ldr r1, [sp, #200] @ 4-byte Reload
+ str r3, [r0, #48]
+ ldr r3, [sp, #136] @ 4-byte Reload
+ movne r1, r3
+ str r1, [r0, #52]
+ ldr r1, [sp, #140] @ 4-byte Reload
+ movne r7, r1
+ cmp r2, #0
+ movne r11, r10
+ movne r6, r8
+ str r7, [r0, #56]
+ str r11, [r0, #60]
+ str r6, [r0, #64]
+ add sp, sp, #444
+ add sp, sp, #1024
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end260:
+ .size mcl_fp_montRed17L, .Lfunc_end260-mcl_fp_montRed17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addPre17L
+ .align 2
+ .type mcl_fp_addPre17L,%function
+mcl_fp_addPre17L: @ @mcl_fp_addPre17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #76
+ sub sp, sp, #76
+ ldm r1, {r3, lr}
+ ldr r8, [r1, #8]
+ ldr r5, [r1, #12]
+ ldm r2, {r6, r7, r12}
+ ldr r4, [r2, #12]
+ add r10, r2, #16
+ adds r3, r6, r3
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #64]
+ str r3, [sp, #72] @ 4-byte Spill
+ adcs r3, r7, lr
+ add lr, r1, #16
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #32]
+ adcs r6, r12, r8
+ adcs r8, r4, r5
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r3, [sp, #44] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [r2, #48]
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [r2, #52]
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [r2, #56]
+ str r3, [sp, #60] @ 4-byte Spill
+ ldr r3, [r2, #60]
+ str r3, [sp, #64] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ str r3, [sp, #24] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r2, [r1, #64]
+ ldr r11, [r1, #60]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #36]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldm lr, {r1, r2, r3, r12, lr}
+ ldr r9, [sp, #32] @ 4-byte Reload
+ ldr r7, [sp, #28] @ 4-byte Reload
+ adcs r1, r4, r1
+ str r9, [r0]
+ str r7, [r0, #4]
+ str r6, [r0, #8]
+ str r8, [r0, #12]
+ ldr r7, [sp, #8] @ 4-byte Reload
+ ldr r6, [sp, #12] @ 4-byte Reload
+ ldr r4, [sp, #20] @ 4-byte Reload
+ adcs r2, r5, r2
+ str r1, [r0, #16]
+ ldr r5, [sp, #16] @ 4-byte Reload
+ adcs r1, r10, r3
+ str r2, [r0, #20]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ ldr r3, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adcs r2, r2, r12
+ adcs r12, r1, lr
+ str r2, [r0, #28]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ ldr r2, [sp] @ 4-byte Reload
+ str r12, [r0, #32]
+ add r12, r0, #36
+ adcs r2, r1, r2
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r3, r1, r3
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r7, r1, r7
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r5, r1, r5
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r4, r1, r4
+ ldr r1, [sp, #64] @ 4-byte Reload
+ stm r12, {r2, r3, r7}
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ str r4, [r0, #56]
+ ldr r2, [sp, #68] @ 4-byte Reload
+ adcs r1, r1, r11
+ str r1, [r0, #60]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [r0, #64]
+ mov r0, #0
+ adc r0, r0, #0
+ add sp, sp, #76
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end261:
+ .size mcl_fp_addPre17L, .Lfunc_end261-mcl_fp_addPre17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subPre17L
+ .align 2
+ .type mcl_fp_subPre17L,%function
+mcl_fp_subPre17L: @ @mcl_fp_subPre17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #76
+ sub sp, sp, #76
+ ldm r2, {r3, lr}
+ ldr r8, [r2, #8]
+ ldr r5, [r2, #12]
+ ldm r1, {r6, r7, r12}
+ ldr r4, [r1, #12]
+ add r10, r2, #16
+ subs r3, r6, r3
+ str r3, [sp, #32] @ 4-byte Spill
+ ldr r3, [r2, #64]
+ str r3, [sp, #72] @ 4-byte Spill
+ sbcs r3, r7, lr
+ add lr, r1, #16
+ str r3, [sp, #28] @ 4-byte Spill
+ ldr r3, [r2, #32]
+ sbcs r6, r12, r8
+ sbcs r8, r4, r5
+ str r3, [sp, #36] @ 4-byte Spill
+ ldr r3, [r2, #36]
+ str r3, [sp, #40] @ 4-byte Spill
+ ldr r3, [r2, #40]
+ str r3, [sp, #44] @ 4-byte Spill
+ ldr r3, [r2, #44]
+ str r3, [sp, #48] @ 4-byte Spill
+ ldr r3, [r2, #48]
+ str r3, [sp, #52] @ 4-byte Spill
+ ldr r3, [r2, #52]
+ str r3, [sp, #56] @ 4-byte Spill
+ ldr r3, [r2, #56]
+ str r3, [sp, #60] @ 4-byte Spill
+ ldr r3, [r2, #60]
+ str r3, [sp, #64] @ 4-byte Spill
+ ldr r3, [r2, #28]
+ str r3, [sp, #24] @ 4-byte Spill
+ ldm r10, {r4, r5, r10}
+ ldr r2, [r1, #64]
+ ldr r11, [r1, #60]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #36]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #40]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #48]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #52]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r1, #56]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldm lr, {r1, r2, r3, r12, lr}
+ ldr r9, [sp, #32] @ 4-byte Reload
+ ldr r7, [sp, #28] @ 4-byte Reload
+ sbcs r1, r1, r4
+ str r9, [r0]
+ str r7, [r0, #4]
+ str r6, [r0, #8]
+ str r8, [r0, #12]
+ ldr r7, [sp, #8] @ 4-byte Reload
+ ldr r6, [sp, #12] @ 4-byte Reload
+ ldr r4, [sp, #20] @ 4-byte Reload
+ sbcs r2, r2, r5
+ str r1, [r0, #16]
+ ldr r5, [sp, #16] @ 4-byte Reload
+ sbcs r1, r3, r10
+ str r2, [r0, #20]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ ldr r3, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ sbcs r2, r12, r2
+ sbcs r12, lr, r1
+ str r2, [r0, #28]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ ldr r2, [sp] @ 4-byte Reload
+ str r12, [r0, #32]
+ add r12, r0, #36
+ sbcs r2, r2, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ sbcs r3, r3, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ sbcs r7, r7, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ sbcs r6, r6, r1
+ ldr r1, [sp, #56] @ 4-byte Reload
+ sbcs r5, r5, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ sbcs r4, r4, r1
+ ldr r1, [sp, #64] @ 4-byte Reload
+ stm r12, {r2, r3, r7}
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ str r4, [r0, #56]
+ ldr r2, [sp, #68] @ 4-byte Reload
+ sbcs r1, r11, r1
+ str r1, [r0, #60]
+ ldr r1, [sp, #72] @ 4-byte Reload
+ sbcs r1, r2, r1
+ str r1, [r0, #64]
+ mov r0, #0
+ sbc r0, r0, #0
+ and r0, r0, #1
+ add sp, sp, #76
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end262:
+ .size mcl_fp_subPre17L, .Lfunc_end262-mcl_fp_subPre17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_shr1_17L
+ .align 2
+ .type mcl_fp_shr1_17L,%function
+mcl_fp_shr1_17L: @ @mcl_fp_shr1_17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #20
+ sub sp, sp, #20
+ ldr r4, [r1, #4]
+ ldr r3, [r1, #8]
+ add r9, r1, #32
+ ldr r2, [r1, #12]
+ ldr r11, [r1]
+ lsr r7, r4, #1
+ lsr lr, r2, #1
+ lsrs r2, r2, #1
+ orr r10, r7, r3, lsl #31
+ ldr r7, [r1, #64]
+ rrx r12, r3
+ lsrs r3, r4, #1
+ add r4, r1, #16
+ rrx r11, r11
+ str r7, [sp, #16] @ 4-byte Spill
+ ldm r9, {r5, r7, r9}
+ ldr r6, [r1, #48]
+ ldr r8, [r1, #44]
+ str r6, [sp] @ 4-byte Spill
+ ldr r6, [r1, #52]
+ str r6, [sp, #4] @ 4-byte Spill
+ ldr r6, [r1, #56]
+ str r6, [sp, #8] @ 4-byte Spill
+ ldr r6, [r1, #60]
+ str r6, [sp, #12] @ 4-byte Spill
+ ldm r4, {r1, r2, r3, r4}
+ str r11, [r0]
+ stmib r0, {r10, r12}
+ orr r6, lr, r1, lsl #31
+ str r6, [r0, #12]
+ lsrs r6, r2, #1
+ rrx r1, r1
+ str r1, [r0, #16]
+ lsr r1, r2, #1
+ ldr r2, [sp, #4] @ 4-byte Reload
+ orr r1, r1, r3, lsl #31
+ str r1, [r0, #20]
+ lsrs r1, r4, #1
+ rrx r1, r3
+ ldr r3, [sp] @ 4-byte Reload
+ str r1, [r0, #24]
+ lsr r1, r4, #1
+ orr r1, r1, r5, lsl #31
+ str r1, [r0, #28]
+ lsrs r1, r7, #1
+ rrx r1, r5
+ str r1, [r0, #32]
+ lsr r1, r7, #1
+ orr r1, r1, r9, lsl #31
+ str r1, [r0, #36]
+ lsrs r1, r8, #1
+ rrx r1, r9
+ str r1, [r0, #40]
+ lsr r1, r8, #1
+ orr r1, r1, r3, lsl #31
+ str r1, [r0, #44]
+ lsrs r1, r2, #1
+ rrx r1, r3
+ ldr r3, [sp, #8] @ 4-byte Reload
+ str r1, [r0, #48]
+ lsr r1, r2, #1
+ ldr r2, [sp, #12] @ 4-byte Reload
+ orr r1, r1, r3, lsl #31
+ str r1, [r0, #52]
+ lsrs r1, r2, #1
+ rrx r1, r3
+ str r1, [r0, #56]
+ lsr r1, r2, #1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ orr r1, r1, r2, lsl #31
+ str r1, [r0, #60]
+ lsr r1, r2, #1
+ str r1, [r0, #64]
+ add sp, sp, #20
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end263:
+ .size mcl_fp_shr1_17L, .Lfunc_end263-mcl_fp_shr1_17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_add17L
+ .align 2
+ .type mcl_fp_add17L,%function
+mcl_fp_add17L: @ @mcl_fp_add17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #64
+ sub sp, sp, #64
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r7}
+ adds r9, r4, r9
+ ldr r4, [r1, #24]
+ adcs r5, r5, r8
+ mov r8, r9
+ adcs r6, r6, lr
+ str r5, [sp, #32] @ 4-byte Spill
+ ldr r5, [r1, #20]
+ str r8, [r0]
+ adcs r7, r7, r12
+ str r6, [sp, #28] @ 4-byte Spill
+ ldr r6, [r1, #16]
+ ldr lr, [sp, #32] @ 4-byte Reload
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r2, #16]
+ str lr, [r0, #4]
+ adcs r10, r7, r6
+ ldr r7, [r2, #20]
+ ldr r6, [r2, #28]
+ str r10, [sp, #4] @ 4-byte Spill
+ adcs r7, r7, r5
+ ldr r5, [r2, #44]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ ldr r9, [sp, #20] @ 4-byte Reload
+ adcs r7, r7, r4
+ ldr r4, [r2, #48]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r1, #28]
+ adcs r7, r6, r7
+ ldr r6, [r2, #32]
+ str r7, [sp, #12] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ adcs r7, r6, r7
+ ldr r6, [r2, #36]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r1, #36]
+ adcs r7, r6, r7
+ ldr r6, [r2, #40]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r1, #40]
+ adcs r7, r6, r7
+ ldr r6, [r1, #44]
+ str r7, [sp, #52] @ 4-byte Spill
+ adcs r7, r5, r6
+ ldr r5, [r1, #48]
+ ldr r6, [r2, #56]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ adcs r11, r4, r5
+ ldr r4, [r1, #52]
+ ldr r5, [sp, #24] @ 4-byte Reload
+ str r11, [sp, #8] @ 4-byte Spill
+ adcs r7, r7, r4
+ ldr r4, [sp, #28] @ 4-byte Reload
+ str r7, [sp, #44] @ 4-byte Spill
+ ldr r7, [r1, #56]
+ str r4, [r0, #8]
+ str r5, [r0, #12]
+ str r10, [r0, #16]
+ str r9, [r0, #20]
+ ldr r10, [sp, #12] @ 4-byte Reload
+ adcs r12, r6, r7
+ ldr r7, [r1, #60]
+ ldr r6, [r2, #60]
+ ldr r1, [r1, #64]
+ ldr r2, [r2, #64]
+ adcs r6, r6, r7
+ adcs r2, r2, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ str r2, [sp, #36] @ 4-byte Spill
+ str r1, [r0, #24]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r10, [r0, #28]
+ str r2, [r0, #64]
+ mov r2, #0
+ str r1, [r0, #32]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r1, [r0, #36]
+ ldr r1, [sp, #52] @ 4-byte Reload
+ str r1, [r0, #40]
+ ldr r1, [sp, #48] @ 4-byte Reload
+ str r1, [r0, #44]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ str r11, [r0, #48]
+ mov r11, r12
+ str r1, [r0, #52]
+ adc r1, r2, #0
+ str r12, [r0, #56]
+ str r6, [r0, #60]
+ mov r12, r6
+ str r1, [sp, #16] @ 4-byte Spill
+ ldm r3, {r6, r7}
+ ldr r1, [r3, #8]
+ ldr r2, [r3, #12]
+ subs r6, r8, r6
+ sbcs r7, lr, r7
+ str r6, [sp] @ 4-byte Spill
+ sbcs r1, r4, r1
+ str r7, [sp, #32] @ 4-byte Spill
+ str r1, [sp, #28] @ 4-byte Spill
+ sbcs r1, r5, r2
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r1, [sp, #24] @ 4-byte Spill
+ ldr r1, [r3, #16]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #4] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ sbcs r9, r9, r1
+ ldr r1, [r3, #24]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r1, [sp, #60] @ 4-byte Spill
+ ldr r1, [r3, #28]
+ sbcs r10, r10, r1
+ ldr r1, [r3, #32]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #56] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ sbcs lr, r2, r1
+ ldr r1, [r3, #44]
+ ldr r2, [sp, #48] @ 4-byte Reload
+ sbcs r8, r2, r1
+ ldr r1, [r3, #48]
+ ldr r2, [sp, #8] @ 4-byte Reload
+ sbcs r4, r2, r1
+ ldr r1, [r3, #52]
+ ldr r2, [sp, #44] @ 4-byte Reload
+ sbcs r5, r2, r1
+ ldr r1, [r3, #56]
+ ldr r2, [sp, #36] @ 4-byte Reload
+ sbcs r7, r11, r1
+ ldr r1, [r3, #60]
+ sbcs r6, r12, r1
+ ldr r1, [r3, #64]
+ sbcs r1, r2, r1
+ ldr r2, [sp, #16] @ 4-byte Reload
+ sbc r2, r2, #0
+ tst r2, #1
+ bne .LBB264_2
+@ BB#1: @ %nocarry
+ ldr r2, [sp] @ 4-byte Reload
+ str r2, [r0]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r2, [r0, #4]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ str r2, [r0, #8]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ str r2, [r0, #12]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r2, [r0, #16]
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r9, [r0, #20]
+ str r2, [r0, #24]
+ str r10, [r0, #28]
+ str r1, [r0, #64]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r1, [r0, #36]
+ add r1, r0, #48
+ str lr, [r0, #40]
+ str r8, [r0, #44]
+ stm r1, {r4, r5, r7}
+ str r6, [r0, #60]
+.LBB264_2: @ %carry
+ add sp, sp, #64
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end264:
+ .size mcl_fp_add17L, .Lfunc_end264-mcl_fp_add17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_addNF17L
+ .align 2
+ .type mcl_fp_addNF17L,%function
+mcl_fp_addNF17L: @ @mcl_fp_addNF17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #96
+ sub sp, sp, #96
+ ldr r9, [r1]
+ ldmib r1, {r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r7}
+ adds r11, r4, r9
+ ldr r4, [r1, #24]
+ adcs r10, r5, r8
+ ldr r5, [r1, #20]
+ str r11, [sp, #8] @ 4-byte Spill
+ adcs r8, r6, lr
+ ldr r6, [r1, #16]
+ str r10, [sp, #16] @ 4-byte Spill
+ adcs r9, r7, r12
+ ldr r7, [r2, #16]
+ str r8, [sp, #20] @ 4-byte Spill
+ str r9, [sp, #24] @ 4-byte Spill
+ adcs r7, r7, r6
+ ldr r6, [r2, #28]
+ str r7, [sp, #48] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ adcs lr, r7, r5
+ ldr r7, [r2, #24]
+ str lr, [sp, #4] @ 4-byte Spill
+ adcs r7, r7, r4
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r1, #28]
+ adcs r7, r6, r7
+ ldr r6, [r2, #32]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r1, #32]
+ adcs r7, r6, r7
+ ldr r6, [r2, #36]
+ str r7, [sp, #52] @ 4-byte Spill
+ ldr r7, [r1, #36]
+ adcs r7, r6, r7
+ ldr r6, [r2, #40]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r1, #40]
+ adcs r7, r6, r7
+ ldr r6, [r2, #44]
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r1, #44]
+ adcs r7, r6, r7
+ ldr r6, [r2, #48]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r1, #48]
+ adcs r7, r6, r7
+ ldr r6, [r2, #52]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r1, #52]
+ adcs r7, r6, r7
+ ldr r6, [r2, #56]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r1, #56]
+ adcs r7, r6, r7
+ ldr r6, [r2, #60]
+ ldr r2, [r2, #64]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r1, #60]
+ ldr r1, [r1, #64]
+ adcs r7, r6, r7
+ adc r1, r2, r1
+ str r7, [sp, #92] @ 4-byte Spill
+ str r1, [sp, #88] @ 4-byte Spill
+ ldm r3, {r1, r7}
+ ldr r6, [r3, #8]
+ ldr r5, [r3, #12]
+ ldr r2, [sp, #48] @ 4-byte Reload
+ subs r12, r11, r1
+ ldr r1, [r3, #64]
+ ldr r11, [r3, #36]
+ sbcs r4, r10, r7
+ ldr r10, [r3, #32]
+ ldr r7, [r3, #24]
+ sbcs r6, r8, r6
+ sbcs r9, r9, r5
+ ldr r5, [r3, #28]
+ str r1, [sp] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [r3, #56]
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [r3, #60]
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [r3, #20]
+ ldr r3, [r3, #16]
+ sbcs r2, r2, r3
+ sbcs r3, lr, r1
+ ldr r1, [sp, #60] @ 4-byte Reload
+ sbcs lr, r1, r7
+ ldr r1, [sp, #56] @ 4-byte Reload
+ ldr r7, [sp, #12] @ 4-byte Reload
+ sbcs r5, r1, r5
+ ldr r1, [sp, #52] @ 4-byte Reload
+ sbcs r8, r1, r10
+ ldr r1, [sp, #72] @ 4-byte Reload
+ sbcs r11, r1, r11
+ ldr r1, [sp, #68] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r1, [sp, #12] @ 4-byte Spill
+ ldr r1, [sp, #64] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r1, [sp, #28] @ 4-byte Spill
+ ldr r1, [sp, #84] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #36] @ 4-byte Reload
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #40] @ 4-byte Reload
+ str r1, [sp, #36] @ 4-byte Spill
+ ldr r1, [sp, #76] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r1, [sp, #40] @ 4-byte Spill
+ ldr r1, [sp, #92] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp] @ 4-byte Reload
+ str r1, [sp, #44] @ 4-byte Spill
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbc r10, r1, r7
+ ldr r7, [sp, #8] @ 4-byte Reload
+ asr r1, r10, #31
+ cmp r1, #0
+ movlt r12, r7
+ ldr r7, [sp, #16] @ 4-byte Reload
+ str r12, [r0]
+ movlt r4, r7
+ str r4, [r0, #4]
+ ldr r4, [sp, #20] @ 4-byte Reload
+ movlt r6, r4
+ cmp r1, #0
+ str r6, [r0, #8]
+ ldr r6, [sp, #24] @ 4-byte Reload
+ movlt r9, r6
+ ldr r6, [sp, #48] @ 4-byte Reload
+ str r9, [r0, #12]
+ movlt r2, r6
+ str r2, [r0, #16]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #60] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #20]
+ ldr r3, [sp, #12] @ 4-byte Reload
+ movlt lr, r2
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str lr, [r0, #24]
+ movlt r5, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str r5, [r0, #28]
+ movlt r8, r2
+ ldr r2, [sp, #72] @ 4-byte Reload
+ cmp r1, #0
+ str r8, [r0, #32]
+ movlt r11, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r11, [r0, #36]
+ movlt r3, r2
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r3, [r0, #40]
+ ldr r3, [sp, #28] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #84] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #44]
+ ldr r3, [sp, #32] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r3, [r0, #48]
+ ldr r3, [sp, #36] @ 4-byte Reload
+ movlt r3, r2
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str r3, [r0, #52]
+ ldr r3, [sp, #40] @ 4-byte Reload
+ movlt r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #92] @ 4-byte Reload
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r3, [r0, #56]
+ movlt r2, r1
+ ldr r1, [sp, #88] @ 4-byte Reload
+ str r2, [r0, #60]
+ movlt r10, r1
+ str r10, [r0, #64]
+ add sp, sp, #96
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end265:
+ .size mcl_fp_addNF17L, .Lfunc_end265-mcl_fp_addNF17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_sub17L
+ .align 2
+ .type mcl_fp_sub17L,%function
+mcl_fp_sub17L: @ @mcl_fp_sub17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #92
+ sub sp, sp, #92
+ ldm r2, {r8, r9, lr}
+ ldr r12, [r2, #12]
+ ldm r1, {r4, r5, r6, r7}
+ subs r4, r4, r8
+ sbcs r5, r5, r9
+ str r4, [sp, #68] @ 4-byte Spill
+ ldr r4, [r2, #24]
+ sbcs r6, r6, lr
+ str r5, [sp, #88] @ 4-byte Spill
+ ldr r5, [r2, #20]
+ sbcs r7, r7, r12
+ str r6, [sp, #84] @ 4-byte Spill
+ ldr r6, [r2, #16]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r1, #16]
+ sbcs r7, r7, r6
+ ldr r6, [r1, #28]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r1, #20]
+ sbcs r7, r7, r5
+ ldr r5, [r1, #44]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r1, #24]
+ sbcs r11, r7, r4
+ ldr r7, [r2, #28]
+ ldr r4, [r2, #52]
+ sbcs r10, r6, r7
+ ldr r7, [r2, #32]
+ ldr r6, [r1, #32]
+ str r10, [sp, #60] @ 4-byte Spill
+ sbcs r9, r6, r7
+ ldr r7, [r2, #36]
+ ldr r6, [r1, #36]
+ str r9, [sp, #56] @ 4-byte Spill
+ sbcs r7, r6, r7
+ ldr r6, [r1, #40]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ sbcs r8, r6, r7
+ ldr r7, [r2, #44]
+ str r8, [sp, #52] @ 4-byte Spill
+ sbcs lr, r5, r7
+ ldr r7, [r2, #48]
+ ldr r5, [r1, #48]
+ str lr, [sp, #48] @ 4-byte Spill
+ sbcs r6, r5, r7
+ ldr r5, [r1, #52]
+ sbcs r7, r5, r4
+ ldr r4, [r2, #56]
+ ldr r5, [r1, #56]
+ str r7, [sp, #44] @ 4-byte Spill
+ sbcs r12, r5, r4
+ ldr r4, [r2, #60]
+ ldr r5, [r1, #60]
+ ldr r2, [r2, #64]
+ ldr r1, [r1, #64]
+ str r12, [sp, #40] @ 4-byte Spill
+ sbcs r4, r5, r4
+ ldr r5, [sp, #64] @ 4-byte Reload
+ sbcs r1, r1, r2
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r2, [r0]
+ ldr r2, [sp, #88] @ 4-byte Reload
+ str r2, [r0, #4]
+ ldr r2, [sp, #84] @ 4-byte Reload
+ str r2, [r0, #8]
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r2, [r0, #12]
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str r2, [r0, #16]
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r2, [r0, #20]
+ add r2, r0, #36
+ str r11, [r0, #24]
+ str r10, [r0, #28]
+ str r1, [r0, #64]
+ str r9, [r0, #32]
+ stm r2, {r5, r8, lr}
+ add r2, r0, #48
+ stm r2, {r6, r7, r12}
+ mov r2, #0
+ str r4, [r0, #60]
+ sbc r2, r2, #0
+ tst r2, #1
+ beq .LBB266_2
+@ BB#1: @ %carry
+ ldr r2, [r3, #64]
+ mov r9, r4
+ str r2, [sp, #36] @ 4-byte Spill
+ ldm r3, {r4, r12}
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r6, [sp, #28] @ 4-byte Spill
+ ldr r7, [r3, #8]
+ str r1, [sp, #32] @ 4-byte Spill
+ ldr r1, [r3, #12]
+ ldr lr, [r3, #20]
+ adds r8, r4, r2
+ ldr r2, [r3, #32]
+ str r8, [r0]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r3, #36]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldr r2, [r3, #40]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r3, #44]
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r3, #48]
+ str r2, [sp, #16] @ 4-byte Spill
+ ldr r2, [r3, #52]
+ str r2, [sp, #20] @ 4-byte Spill
+ ldr r2, [r3, #56]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [r3, #60]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [sp, #88] @ 4-byte Reload
+ adcs r6, r12, r2
+ ldr r2, [sp, #84] @ 4-byte Reload
+ adcs r7, r7, r2
+ ldr r2, [sp, #80] @ 4-byte Reload
+ adcs r4, r1, r2
+ ldr r2, [r3, #28]
+ ldr r1, [r3, #24]
+ ldr r3, [r3, #16]
+ stmib r0, {r6, r7}
+ ldr r7, [sp, #76] @ 4-byte Reload
+ str r4, [r0, #12]
+ ldr r6, [sp, #16] @ 4-byte Reload
+ ldr r4, [sp, #24] @ 4-byte Reload
+ adcs r3, r3, r7
+ ldr r7, [sp, #72] @ 4-byte Reload
+ str r3, [r0, #16]
+ ldr r3, [sp, #60] @ 4-byte Reload
+ adcs r7, lr, r7
+ adcs r1, r1, r11
+ str r7, [r0, #20]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ adcs r3, r2, r3
+ str r1, [r0, #24]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ ldr r2, [sp] @ 4-byte Reload
+ str r3, [r0, #28]
+ ldr r3, [sp, #8] @ 4-byte Reload
+ adcs r12, r2, r1
+ ldr r1, [sp, #4] @ 4-byte Reload
+ str r12, [r0, #32]
+ add r12, r0, #36
+ adcs r2, r1, r5
+ ldr r1, [sp, #52] @ 4-byte Reload
+ ldr r5, [sp, #20] @ 4-byte Reload
+ adcs r3, r3, r1
+ ldr r1, [sp, #48] @ 4-byte Reload
+ adcs r7, r7, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ adcs r6, r6, r1
+ ldr r1, [sp, #44] @ 4-byte Reload
+ adcs r5, r5, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ adcs r4, r4, r1
+ ldr r1, [sp, #68] @ 4-byte Reload
+ stm r12, {r2, r3, r7}
+ str r6, [r0, #48]
+ str r5, [r0, #52]
+ str r4, [r0, #56]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ adcs r1, r1, r9
+ str r1, [r0, #60]
+ ldr r1, [sp, #36] @ 4-byte Reload
+ adc r1, r1, r2
+ str r1, [r0, #64]
+.LBB266_2: @ %nocarry
+ add sp, sp, #92
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end266:
+ .size mcl_fp_sub17L, .Lfunc_end266-mcl_fp_sub17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fp_subNF17L
+ .align 2
+ .type mcl_fp_subNF17L,%function
+mcl_fp_subNF17L: @ @mcl_fp_subNF17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #104
+ sub sp, sp, #104
+ mov r12, r0
+ ldr r0, [r2, #64]
+ ldr r11, [r2]
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r1, #64]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r2, #32]
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [r2, #36]
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [r2, #40]
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [r2, #44]
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [r2, #48]
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [r2, #52]
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [r2, #56]
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [r2, #60]
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [r1, #60]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r1, #56]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r1, #52]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r1, #48]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldmib r2, {r5, r6, r7, r8, r9, r10}
+ ldr r0, [r2, #28]
+ ldr r2, [r1]
+ str r0, [sp, #64] @ 4-byte Spill
+ ldmib r1, {r0, lr}
+ ldr r4, [r1, #12]
+ subs r2, r2, r11
+ add r11, r3, #8
+ str r2, [sp, #12] @ 4-byte Spill
+ ldr r2, [r1, #44]
+ sbcs r0, r0, r5
+ ldr r5, [r1, #40]
+ str r0, [sp, #8] @ 4-byte Spill
+ sbcs r0, lr, r6
+ ldr r6, [r1, #36]
+ str r0, [sp, #48] @ 4-byte Spill
+ sbcs r0, r4, r7
+ ldr r7, [r1, #16]
+ str r0, [sp, #52] @ 4-byte Spill
+ sbcs r0, r7, r8
+ ldr r7, [r1, #20]
+ str r0, [sp, #56] @ 4-byte Spill
+ sbcs r0, r7, r9
+ ldr r7, [r1, #24]
+ str r0, [sp, #60] @ 4-byte Spill
+ sbcs r0, r7, r10
+ ldr r7, [r1, #32]
+ ldr r1, [r1, #28]
+ str r0, [sp, #68] @ 4-byte Spill
+ ldr r0, [sp, #64] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #64] @ 4-byte Spill
+ ldr r0, [sp, #72] @ 4-byte Reload
+ sbcs r0, r7, r0
+ str r0, [sp, #72] @ 4-byte Spill
+ ldr r0, [sp, #76] @ 4-byte Reload
+ sbcs r0, r6, r0
+ str r0, [sp, #76] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ sbcs r0, r5, r0
+ str r0, [sp, #80] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ sbcs r0, r2, r0
+ str r0, [sp, #84] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #88] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #92] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #96] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ sbcs r0, r1, r0
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #100] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ sbc r0, r1, r0
+ str r0, [sp, #44] @ 4-byte Spill
+ ldr r0, [r3, #64]
+ str r0, [sp, #40] @ 4-byte Spill
+ ldr r0, [r3, #36]
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [r3, #40]
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [r3, #44]
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [r3, #48]
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [r3, #52]
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [r3, #56]
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [r3, #60]
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [r3, #32]
+ str r0, [sp] @ 4-byte Spill
+ ldm r3, {r2, r7}
+ ldm r11, {r1, r4, r5, r6, r11}
+ ldr r8, [sp, #12] @ 4-byte Reload
+ ldr r10, [sp, #8] @ 4-byte Reload
+ ldr r0, [r3, #28]
+ adds r2, r8, r2
+ adcs r3, r10, r7
+ ldr r7, [sp, #48] @ 4-byte Reload
+ adcs lr, r7, r1
+ ldr r1, [sp, #52] @ 4-byte Reload
+ adcs r4, r1, r4
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r5, r1, r5
+ ldr r1, [sp, #60] @ 4-byte Reload
+ adcs r6, r1, r6
+ ldr r1, [sp, #68] @ 4-byte Reload
+ adcs r7, r1, r11
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r9, r1, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ ldr r1, [sp] @ 4-byte Reload
+ adcs r11, r0, r1
+ ldr r0, [sp, #76] @ 4-byte Reload
+ ldr r1, [sp, #4] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #16] @ 4-byte Reload
+ str r0, [sp, #4] @ 4-byte Spill
+ ldr r0, [sp, #80] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #20] @ 4-byte Reload
+ str r0, [sp, #16] @ 4-byte Spill
+ ldr r0, [sp, #84] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #24] @ 4-byte Reload
+ str r0, [sp, #20] @ 4-byte Spill
+ ldr r0, [sp, #88] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #28] @ 4-byte Reload
+ str r0, [sp, #24] @ 4-byte Spill
+ ldr r0, [sp, #92] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #32] @ 4-byte Reload
+ str r0, [sp, #28] @ 4-byte Spill
+ ldr r0, [sp, #96] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r0, [sp, #32] @ 4-byte Spill
+ ldr r0, [sp, #100] @ 4-byte Reload
+ adcs r0, r0, r1
+ ldr r1, [sp, #40] @ 4-byte Reload
+ str r0, [sp, #36] @ 4-byte Spill
+ ldr r0, [sp, #44] @ 4-byte Reload
+ adc r1, r0, r1
+ str r1, [sp, #40] @ 4-byte Spill
+ asr r1, r0, #31
+ ldr r0, [sp, #48] @ 4-byte Reload
+ cmp r1, #0
+ movge r2, r8
+ movge r3, r10
+ str r2, [r12]
+ ldr r2, [sp, #4] @ 4-byte Reload
+ str r3, [r12, #4]
+ movge lr, r0
+ ldr r0, [sp, #52] @ 4-byte Reload
+ cmp r1, #0
+ str lr, [r12, #8]
+ movge r4, r0
+ ldr r0, [sp, #56] @ 4-byte Reload
+ str r4, [r12, #12]
+ movge r5, r0
+ ldr r0, [sp, #60] @ 4-byte Reload
+ str r5, [r12, #16]
+ movge r6, r0
+ ldr r0, [sp, #68] @ 4-byte Reload
+ cmp r1, #0
+ str r6, [r12, #20]
+ movge r7, r0
+ ldr r0, [sp, #64] @ 4-byte Reload
+ str r7, [r12, #24]
+ movge r9, r0
+ ldr r0, [sp, #72] @ 4-byte Reload
+ str r9, [r12, #28]
+ movge r11, r0
+ ldr r0, [sp, #76] @ 4-byte Reload
+ cmp r1, #0
+ str r11, [r12, #32]
+ movge r2, r0
+ ldr r0, [sp, #80] @ 4-byte Reload
+ str r2, [r12, #36]
+ ldr r2, [sp, #16] @ 4-byte Reload
+ movge r2, r0
+ ldr r0, [sp, #84] @ 4-byte Reload
+ str r2, [r12, #40]
+ ldr r2, [sp, #20] @ 4-byte Reload
+ movge r2, r0
+ ldr r0, [sp, #88] @ 4-byte Reload
+ cmp r1, #0
+ str r2, [r12, #44]
+ ldr r2, [sp, #24] @ 4-byte Reload
+ movge r2, r0
+ ldr r0, [sp, #92] @ 4-byte Reload
+ str r2, [r12, #48]
+ ldr r2, [sp, #28] @ 4-byte Reload
+ movge r2, r0
+ ldr r0, [sp, #96] @ 4-byte Reload
+ str r2, [r12, #52]
+ ldr r2, [sp, #32] @ 4-byte Reload
+ movge r2, r0
+ ldr r0, [sp, #100] @ 4-byte Reload
+ cmp r1, #0
+ ldr r1, [sp, #36] @ 4-byte Reload
+ str r2, [r12, #56]
+ movge r1, r0
+ ldr r0, [sp, #40] @ 4-byte Reload
+ str r1, [r12, #60]
+ ldr r1, [sp, #44] @ 4-byte Reload
+ movge r0, r1
+ str r0, [r12, #64]
+ add sp, sp, #104
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end267:
+ .size mcl_fp_subNF17L, .Lfunc_end267-mcl_fp_subNF17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_add17L
+ .align 2
+ .type mcl_fpDbl_add17L,%function
+mcl_fpDbl_add17L: @ @mcl_fpDbl_add17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #216
+ sub sp, sp, #216
+ ldm r1, {r7, r8, lr}
+ ldr r12, [r1, #12]
+ ldm r2, {r4, r5, r6, r9}
+ add r10, r1, #32
+ adds r4, r4, r7
+ str r4, [sp, #104] @ 4-byte Spill
+ ldr r4, [r2, #128]
+ str r4, [sp, #208] @ 4-byte Spill
+ ldr r4, [r2, #132]
+ str r4, [sp, #212] @ 4-byte Spill
+ adcs r4, r5, r8
+ adcs r7, r6, lr
+ str r4, [sp, #100] @ 4-byte Spill
+ add lr, r1, #16
+ str r7, [sp, #96] @ 4-byte Spill
+ ldr r7, [r2, #96]
+ str r7, [sp, #176] @ 4-byte Spill
+ ldr r7, [r2, #100]
+ str r7, [sp, #180] @ 4-byte Spill
+ ldr r7, [r2, #104]
+ str r7, [sp, #184] @ 4-byte Spill
+ ldr r7, [r2, #108]
+ str r7, [sp, #188] @ 4-byte Spill
+ ldr r7, [r2, #112]
+ str r7, [sp, #192] @ 4-byte Spill
+ ldr r7, [r2, #116]
+ str r7, [sp, #196] @ 4-byte Spill
+ ldr r7, [r2, #120]
+ str r7, [sp, #200] @ 4-byte Spill
+ ldr r7, [r2, #124]
+ str r7, [sp, #204] @ 4-byte Spill
+ adcs r7, r9, r12
+ str r7, [sp, #68] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #156] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #160] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #168] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #164] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #172] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #56] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #60] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #64] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #72] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #52]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #56]
+ str r7, [sp, #88] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #92] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r2, #16]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #128]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #136] @ 4-byte Spill
+ ldr r2, [r1, #132]
+ str r2, [sp, #140] @ 4-byte Spill
+ ldr r2, [r1, #96]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #104]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #108]
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [r1, #112]
+ str r2, [sp, #120] @ 4-byte Spill
+ ldr r2, [r1, #116]
+ str r2, [sp, #124] @ 4-byte Spill
+ ldr r2, [r1, #120]
+ str r2, [sp, #128] @ 4-byte Spill
+ ldr r2, [r1, #124]
+ str r2, [sp, #132] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r2, [sp, #84] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #24] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #28] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #32] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #36] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #40] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r8, r9, r10}
+ ldr r2, [r1, #56]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #104] @ 4-byte Reload
+ ldr r7, [sp, #100] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #96] @ 4-byte Reload
+ str r7, [r0, #8]
+ ldr r7, [sp, #8] @ 4-byte Reload
+ adcs r1, r7, r1
+ ldr r7, [sp, #68] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ adcs r2, r7, r2
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ adcs r1, r1, r12
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #56] @ 4-byte Reload
+ adcs r2, r2, lr
+ str r2, [r0, #28]
+ adcs r1, r1, r4
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #64] @ 4-byte Reload
+ adcs r2, r2, r5
+ str r2, [r0, #36]
+ adcs r1, r1, r6
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r1, [r0, #40]
+ ldr r1, [sp, #76] @ 4-byte Reload
+ adcs r2, r2, r8
+ str r2, [r0, #44]
+ adcs r1, r1, r9
+ ldr r2, [sp, #80] @ 4-byte Reload
+ str r1, [r0, #48]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ adcs r2, r2, r10
+ adcs r1, r1, r7
+ str r2, [r0, #52]
+ ldr r2, [sp, #92] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #56]
+ ldr r1, [sp, #144] @ 4-byte Reload
+ adcs r2, r2, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r2, [r0, #60]
+ ldr r2, [sp, #148] @ 4-byte Reload
+ adcs r1, r1, r7
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r1, [r0, #64]
+ ldr r1, [sp, #152] @ 4-byte Reload
+ adcs r12, r2, r7
+ ldr r2, [sp, #32] @ 4-byte Reload
+ str r12, [sp, #96] @ 4-byte Spill
+ adcs r9, r1, r2
+ ldr r1, [sp, #156] @ 4-byte Reload
+ ldr r2, [sp, #36] @ 4-byte Reload
+ str r9, [sp, #100] @ 4-byte Spill
+ adcs r8, r1, r2
+ ldr r1, [sp, #160] @ 4-byte Reload
+ ldr r2, [sp, #40] @ 4-byte Reload
+ str r8, [sp, #104] @ 4-byte Spill
+ adcs r4, r1, r2
+ ldr r1, [sp, #168] @ 4-byte Reload
+ ldr r2, [sp, #44] @ 4-byte Reload
+ str r4, [sp, #144] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #48] @ 4-byte Reload
+ str r1, [sp, #168] @ 4-byte Spill
+ ldr r1, [sp, #164] @ 4-byte Reload
+ adcs lr, r1, r2
+ ldr r1, [sp, #172] @ 4-byte Reload
+ ldr r2, [sp, #52] @ 4-byte Reload
+ str lr, [sp, #92] @ 4-byte Spill
+ adcs r1, r1, r2
+ ldr r2, [sp, #108] @ 4-byte Reload
+ str r1, [sp, #172] @ 4-byte Spill
+ ldr r1, [sp, #176] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #84] @ 4-byte Reload
+ str r1, [sp, #176] @ 4-byte Spill
+ ldr r1, [sp, #180] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #112] @ 4-byte Reload
+ str r1, [sp, #180] @ 4-byte Spill
+ ldr r1, [sp, #184] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #116] @ 4-byte Reload
+ str r1, [sp, #184] @ 4-byte Spill
+ ldr r1, [sp, #188] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #120] @ 4-byte Reload
+ str r1, [sp, #188] @ 4-byte Spill
+ ldr r1, [sp, #192] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #124] @ 4-byte Reload
+ str r1, [sp, #192] @ 4-byte Spill
+ ldr r1, [sp, #196] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #128] @ 4-byte Reload
+ str r1, [sp, #196] @ 4-byte Spill
+ ldr r1, [sp, #200] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #132] @ 4-byte Reload
+ str r1, [sp, #200] @ 4-byte Spill
+ ldr r1, [sp, #204] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #136] @ 4-byte Reload
+ str r1, [sp, #204] @ 4-byte Spill
+ ldr r1, [sp, #208] @ 4-byte Reload
+ adcs r1, r1, r2
+ ldr r2, [sp, #140] @ 4-byte Reload
+ str r1, [sp, #208] @ 4-byte Spill
+ ldr r1, [sp, #212] @ 4-byte Reload
+ adcs r1, r1, r2
+ str r1, [sp, #212] @ 4-byte Spill
+ mov r1, #0
+ adc r1, r1, #0
+ str r1, [sp, #140] @ 4-byte Spill
+ ldm r3, {r2, r7}
+ ldr r1, [r3, #64]
+ ldr r6, [r3, #8]
+ ldr r5, [r3, #12]
+ ldr r10, [r3, #36]
+ ldr r11, [r3, #40]
+ str r1, [sp, #164] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ subs r12, r12, r2
+ sbcs r7, r9, r7
+ sbcs r6, r8, r6
+ add r8, r3, #20
+ sbcs r9, r4, r5
+ str r1, [sp, #136] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #152] @ 4-byte Spill
+ ldr r1, [r3, #56]
+ str r1, [sp, #156] @ 4-byte Spill
+ ldr r1, [r3, #60]
+ str r1, [sp, #160] @ 4-byte Spill
+ ldm r8, {r1, r4, r5, r8}
+ ldr r3, [r3, #16]
+ ldr r2, [sp, #168] @ 4-byte Reload
+ sbcs r2, r2, r3
+ sbcs r3, lr, r1
+ ldr r1, [sp, #172] @ 4-byte Reload
+ sbcs lr, r1, r4
+ ldr r1, [sp, #176] @ 4-byte Reload
+ sbcs r4, r1, r5
+ ldr r1, [sp, #180] @ 4-byte Reload
+ ldr r5, [sp, #136] @ 4-byte Reload
+ sbcs r8, r1, r8
+ ldr r1, [sp, #184] @ 4-byte Reload
+ sbcs r10, r1, r10
+ ldr r1, [sp, #188] @ 4-byte Reload
+ sbcs r11, r1, r11
+ ldr r1, [sp, #192] @ 4-byte Reload
+ sbcs r1, r1, r5
+ ldr r5, [sp, #148] @ 4-byte Reload
+ str r1, [sp, #136] @ 4-byte Spill
+ ldr r1, [sp, #196] @ 4-byte Reload
+ sbcs r1, r1, r5
+ ldr r5, [sp, #152] @ 4-byte Reload
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [sp, #200] @ 4-byte Reload
+ sbcs r1, r1, r5
+ ldr r5, [sp, #156] @ 4-byte Reload
+ str r1, [sp, #152] @ 4-byte Spill
+ ldr r1, [sp, #204] @ 4-byte Reload
+ sbcs r1, r1, r5
+ ldr r5, [sp, #160] @ 4-byte Reload
+ str r1, [sp, #156] @ 4-byte Spill
+ ldr r1, [sp, #208] @ 4-byte Reload
+ sbcs r1, r1, r5
+ ldr r5, [sp, #164] @ 4-byte Reload
+ str r1, [sp, #160] @ 4-byte Spill
+ ldr r1, [sp, #212] @ 4-byte Reload
+ sbcs r1, r1, r5
+ ldr r5, [sp, #96] @ 4-byte Reload
+ str r1, [sp, #164] @ 4-byte Spill
+ ldr r1, [sp, #140] @ 4-byte Reload
+ sbc r1, r1, #0
+ ands r1, r1, #1
+ movne r12, r5
+ ldr r5, [sp, #100] @ 4-byte Reload
+ str r12, [r0, #68]
+ movne r7, r5
+ str r7, [r0, #72]
+ ldr r7, [sp, #104] @ 4-byte Reload
+ movne r6, r7
+ ldr r7, [sp, #144] @ 4-byte Reload
+ cmp r1, #0
+ str r6, [r0, #76]
+ movne r9, r7
+ ldr r7, [sp, #168] @ 4-byte Reload
+ str r9, [r0, #80]
+ movne r2, r7
+ str r2, [r0, #84]
+ ldr r2, [sp, #92] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #172] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #88]
+ ldr r3, [sp, #136] @ 4-byte Reload
+ movne lr, r2
+ ldr r2, [sp, #176] @ 4-byte Reload
+ str lr, [r0, #92]
+ movne r4, r2
+ ldr r2, [sp, #180] @ 4-byte Reload
+ str r4, [r0, #96]
+ movne r8, r2
+ ldr r2, [sp, #184] @ 4-byte Reload
+ cmp r1, #0
+ str r8, [r0, #100]
+ movne r10, r2
+ ldr r2, [sp, #188] @ 4-byte Reload
+ str r10, [r0, #104]
+ movne r11, r2
+ ldr r2, [sp, #192] @ 4-byte Reload
+ str r11, [r0, #108]
+ movne r3, r2
+ ldr r2, [sp, #196] @ 4-byte Reload
+ cmp r1, #0
+ str r3, [r0, #112]
+ ldr r3, [sp, #148] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #200] @ 4-byte Reload
+ str r3, [r0, #116]
+ ldr r3, [sp, #152] @ 4-byte Reload
+ movne r3, r2
+ ldr r2, [sp, #204] @ 4-byte Reload
+ str r3, [r0, #120]
+ ldr r3, [sp, #156] @ 4-byte Reload
+ movne r3, r2
+ cmp r1, #0
+ ldr r1, [sp, #208] @ 4-byte Reload
+ ldr r2, [sp, #160] @ 4-byte Reload
+ str r3, [r0, #124]
+ ldr r3, [sp, #164] @ 4-byte Reload
+ movne r2, r1
+ ldr r1, [sp, #212] @ 4-byte Reload
+ str r2, [r0, #128]
+ movne r3, r1
+ str r3, [r0, #132]
+ add sp, sp, #216
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end268:
+ .size mcl_fpDbl_add17L, .Lfunc_end268-mcl_fpDbl_add17L
+ .cantunwind
+ .fnend
+
+ .globl mcl_fpDbl_sub17L
+ .align 2
+ .type mcl_fpDbl_sub17L,%function
+mcl_fpDbl_sub17L: @ @mcl_fpDbl_sub17L
+ .fnstart
+@ BB#0:
+ .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ push {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ .pad #216
+ sub sp, sp, #216
+ ldr r7, [r2, #128]
+ add r10, r1, #32
+ str r7, [sp, #208] @ 4-byte Spill
+ ldr r7, [r2, #132]
+ str r7, [sp, #212] @ 4-byte Spill
+ ldr r7, [r2, #96]
+ str r7, [sp, #188] @ 4-byte Spill
+ ldr r7, [r2, #104]
+ str r7, [sp, #164] @ 4-byte Spill
+ ldr r7, [r2, #108]
+ str r7, [sp, #168] @ 4-byte Spill
+ ldr r7, [r2, #112]
+ str r7, [sp, #192] @ 4-byte Spill
+ ldr r7, [r2, #116]
+ str r7, [sp, #196] @ 4-byte Spill
+ ldr r7, [r2, #120]
+ str r7, [sp, #200] @ 4-byte Spill
+ ldr r7, [r2, #124]
+ str r7, [sp, #204] @ 4-byte Spill
+ ldr r7, [r2, #100]
+ str r7, [sp, #156] @ 4-byte Spill
+ ldr r7, [r2, #64]
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [r2, #68]
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [r2, #72]
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [r2, #76]
+ str r7, [sp, #160] @ 4-byte Spill
+ ldr r7, [r2, #80]
+ str r7, [sp, #172] @ 4-byte Spill
+ ldr r7, [r2, #84]
+ str r7, [sp, #176] @ 4-byte Spill
+ ldr r7, [r2, #88]
+ str r7, [sp, #180] @ 4-byte Spill
+ ldr r7, [r2, #92]
+ str r7, [sp, #184] @ 4-byte Spill
+ ldr r7, [r2, #60]
+ str r7, [sp, #140] @ 4-byte Spill
+ ldm r2, {r6, r8, r12, lr}
+ ldm r1, {r4, r5, r7, r9}
+ subs r4, r4, r6
+ str r4, [sp, #36] @ 4-byte Spill
+ ldr r4, [r2, #56]
+ str r4, [sp, #128] @ 4-byte Spill
+ sbcs r4, r5, r8
+ sbcs r7, r7, r12
+ str r4, [sp, #32] @ 4-byte Spill
+ ldr r4, [r2, #52]
+ str r7, [sp, #28] @ 4-byte Spill
+ ldr r7, [r2, #48]
+ str r4, [sp, #96] @ 4-byte Spill
+ str r7, [sp, #88] @ 4-byte Spill
+ sbcs r7, r9, lr
+ add lr, r1, #16
+ str r7, [sp, #24] @ 4-byte Spill
+ ldr r7, [r2, #44]
+ str r7, [sp, #84] @ 4-byte Spill
+ ldr r7, [r2, #40]
+ str r7, [sp, #80] @ 4-byte Spill
+ ldr r7, [r2, #36]
+ str r7, [sp, #76] @ 4-byte Spill
+ ldr r7, [r2, #32]
+ str r7, [sp, #40] @ 4-byte Spill
+ ldr r7, [r2, #28]
+ str r7, [sp, #20] @ 4-byte Spill
+ ldr r7, [r2, #24]
+ str r7, [sp, #16] @ 4-byte Spill
+ ldr r7, [r2, #20]
+ ldr r2, [r2, #16]
+ str r2, [sp, #8] @ 4-byte Spill
+ ldr r2, [r1, #128]
+ str r7, [sp, #12] @ 4-byte Spill
+ str r2, [sp, #132] @ 4-byte Spill
+ ldr r2, [r1, #132]
+ str r2, [sp, #136] @ 4-byte Spill
+ ldr r2, [r1, #96]
+ str r2, [sp, #100] @ 4-byte Spill
+ ldr r2, [r1, #104]
+ str r2, [sp, #104] @ 4-byte Spill
+ ldr r2, [r1, #108]
+ str r2, [sp, #108] @ 4-byte Spill
+ ldr r2, [r1, #112]
+ str r2, [sp, #112] @ 4-byte Spill
+ ldr r2, [r1, #116]
+ str r2, [sp, #116] @ 4-byte Spill
+ ldr r2, [r1, #120]
+ str r2, [sp, #120] @ 4-byte Spill
+ ldr r2, [r1, #124]
+ str r2, [sp, #124] @ 4-byte Spill
+ ldr r2, [r1, #100]
+ str r2, [sp, #92] @ 4-byte Spill
+ ldr r2, [r1, #64]
+ str r2, [sp, #44] @ 4-byte Spill
+ ldr r2, [r1, #68]
+ str r2, [sp, #48] @ 4-byte Spill
+ ldr r2, [r1, #72]
+ str r2, [sp, #52] @ 4-byte Spill
+ ldr r2, [r1, #76]
+ str r2, [sp, #56] @ 4-byte Spill
+ ldr r2, [r1, #80]
+ str r2, [sp, #60] @ 4-byte Spill
+ ldr r2, [r1, #84]
+ str r2, [sp, #64] @ 4-byte Spill
+ ldr r2, [r1, #88]
+ str r2, [sp, #68] @ 4-byte Spill
+ ldr r2, [r1, #92]
+ str r2, [sp, #72] @ 4-byte Spill
+ ldm r10, {r4, r5, r6, r8, r9, r10}
+ ldr r2, [r1, #56]
+ str r2, [sp] @ 4-byte Spill
+ ldr r2, [r1, #60]
+ str r2, [sp, #4] @ 4-byte Spill
+ ldm lr, {r1, r2, r12, lr}
+ ldr r11, [sp, #36] @ 4-byte Reload
+ ldr r7, [sp, #32] @ 4-byte Reload
+ str r11, [r0]
+ str r7, [r0, #4]
+ ldr r7, [sp, #28] @ 4-byte Reload
+ str r7, [r0, #8]
+ ldr r7, [sp, #8] @ 4-byte Reload
+ sbcs r1, r1, r7
+ ldr r7, [sp, #24] @ 4-byte Reload
+ str r7, [r0, #12]
+ ldr r7, [sp, #12] @ 4-byte Reload
+ str r1, [r0, #16]
+ ldr r1, [sp, #16] @ 4-byte Reload
+ sbcs r2, r2, r7
+ ldr r7, [sp] @ 4-byte Reload
+ str r2, [r0, #20]
+ sbcs r1, r12, r1
+ ldr r2, [sp, #20] @ 4-byte Reload
+ str r1, [r0, #24]
+ ldr r1, [sp, #40] @ 4-byte Reload
+ sbcs r2, lr, r2
+ add lr, r3, #8
+ str r2, [r0, #28]
+ sbcs r1, r4, r1
+ ldr r2, [sp, #76] @ 4-byte Reload
+ str r1, [r0, #32]
+ ldr r1, [sp, #80] @ 4-byte Reload
+ sbcs r2, r5, r2
+ str r2, [r0, #36]
+ sbcs r1, r6, r1
+ ldr r2, [sp, #84] @ 4-byte Reload
+ str r1, [r0, #40]
+ ldr r1, [sp, #88] @ 4-byte Reload
+ sbcs r2, r8, r2
+ sbcs r1, r9, r1
+ str r2, [r0, #44]
+ ldr r2, [sp, #96] @ 4-byte Reload
+ add r9, r3, #20
+ str r1, [r0, #48]
+ ldr r1, [sp, #128] @ 4-byte Reload
+ sbcs r2, r10, r2
+ sbcs r1, r7, r1
+ str r2, [r0, #52]
+ ldr r2, [sp, #140] @ 4-byte Reload
+ ldr r7, [sp, #4] @ 4-byte Reload
+ str r1, [r0, #56]
+ ldr r1, [sp, #144] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #44] @ 4-byte Reload
+ str r2, [r0, #60]
+ ldr r2, [sp, #148] @ 4-byte Reload
+ sbcs r1, r7, r1
+ ldr r7, [sp, #48] @ 4-byte Reload
+ str r1, [r0, #64]
+ ldr r1, [sp, #152] @ 4-byte Reload
+ sbcs r5, r7, r2
+ ldr r2, [sp, #52] @ 4-byte Reload
+ ldr r7, [sp, #100] @ 4-byte Reload
+ sbcs r10, r2, r1
+ ldr r1, [sp, #160] @ 4-byte Reload
+ ldr r2, [sp, #56] @ 4-byte Reload
+ str r10, [sp, #96] @ 4-byte Spill
+ sbcs r1, r2, r1
+ ldr r2, [sp, #60] @ 4-byte Reload
+ str r1, [sp, #160] @ 4-byte Spill
+ ldr r1, [sp, #172] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #64] @ 4-byte Reload
+ str r1, [sp, #172] @ 4-byte Spill
+ ldr r1, [sp, #176] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #68] @ 4-byte Reload
+ str r1, [sp, #176] @ 4-byte Spill
+ ldr r1, [sp, #180] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #72] @ 4-byte Reload
+ str r1, [sp, #180] @ 4-byte Spill
+ ldr r1, [sp, #184] @ 4-byte Reload
+ sbcs r1, r2, r1
+ ldr r2, [sp, #188] @ 4-byte Reload
+ str r1, [sp, #184] @ 4-byte Spill
+ mov r1, #0
+ sbcs r2, r7, r2
+ ldr r7, [sp, #92] @ 4-byte Reload
+ str r2, [sp, #188] @ 4-byte Spill
+ ldr r2, [sp, #156] @ 4-byte Reload
+ sbcs r11, r7, r2
+ ldr r2, [sp, #164] @ 4-byte Reload
+ ldr r7, [sp, #104] @ 4-byte Reload
+ str r11, [sp, #128] @ 4-byte Spill
+ sbcs r2, r7, r2
+ ldr r7, [sp, #108] @ 4-byte Reload
+ str r2, [sp, #164] @ 4-byte Spill
+ ldr r2, [sp, #168] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #112] @ 4-byte Reload
+ str r2, [sp, #168] @ 4-byte Spill
+ ldr r2, [sp, #192] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #116] @ 4-byte Reload
+ str r2, [sp, #192] @ 4-byte Spill
+ ldr r2, [sp, #196] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #120] @ 4-byte Reload
+ str r2, [sp, #196] @ 4-byte Spill
+ ldr r2, [sp, #200] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #124] @ 4-byte Reload
+ str r2, [sp, #200] @ 4-byte Spill
+ ldr r2, [sp, #204] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #132] @ 4-byte Reload
+ str r2, [sp, #204] @ 4-byte Spill
+ ldr r2, [sp, #208] @ 4-byte Reload
+ sbcs r2, r7, r2
+ ldr r7, [sp, #136] @ 4-byte Reload
+ str r2, [sp, #208] @ 4-byte Spill
+ ldr r2, [sp, #212] @ 4-byte Reload
+ sbcs r2, r7, r2
+ sbc r1, r1, #0
+ str r2, [sp, #212] @ 4-byte Spill
+ str r1, [sp, #124] @ 4-byte Spill
+ ldr r1, [r3, #64]
+ str r1, [sp, #156] @ 4-byte Spill
+ ldr r1, [r3, #36]
+ str r1, [sp, #120] @ 4-byte Spill
+ ldr r1, [r3, #40]
+ str r1, [sp, #132] @ 4-byte Spill
+ ldr r1, [r3, #44]
+ str r1, [sp, #136] @ 4-byte Spill
+ ldr r1, [r3, #48]
+ str r1, [sp, #140] @ 4-byte Spill
+ ldr r1, [r3, #52]
+ str r1, [sp, #144] @ 4-byte Spill
+ ldr r1, [r3, #56]
+ str r1, [sp, #148] @ 4-byte Spill
+ ldr r1, [r3, #60]
+ str r1, [sp, #152] @ 4-byte Spill
+ ldr r1, [r3, #32]
+ str r1, [sp, #116] @ 4-byte Spill
+ ldm r3, {r2, r7}
+ ldm lr, {r6, r12, lr}
+ ldm r9, {r4, r8, r9}
+ ldr r3, [sp, #160] @ 4-byte Reload
+ adds r1, r5, r2
+ adcs r2, r10, r7
+ ldr r7, [sp, #164] @ 4-byte Reload
+ adcs r3, r3, r6
+ ldr r6, [sp, #172] @ 4-byte Reload
+ adcs r12, r6, r12
+ ldr r6, [sp, #176] @ 4-byte Reload
+ adcs lr, r6, lr
+ ldr r6, [sp, #180] @ 4-byte Reload
+ adcs r4, r6, r4
+ ldr r6, [sp, #184] @ 4-byte Reload
+ adcs r8, r6, r8
+ ldr r6, [sp, #188] @ 4-byte Reload
+ adcs r9, r6, r9
+ ldr r6, [sp, #116] @ 4-byte Reload
+ adcs r10, r11, r6
+ ldr r6, [sp, #120] @ 4-byte Reload
+ ldr r11, [sp, #156] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #132] @ 4-byte Reload
+ str r7, [sp, #120] @ 4-byte Spill
+ ldr r7, [sp, #168] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #136] @ 4-byte Reload
+ str r7, [sp, #132] @ 4-byte Spill
+ ldr r7, [sp, #192] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #140] @ 4-byte Reload
+ str r7, [sp, #136] @ 4-byte Spill
+ ldr r7, [sp, #196] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #144] @ 4-byte Reload
+ str r7, [sp, #140] @ 4-byte Spill
+ ldr r7, [sp, #200] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #148] @ 4-byte Reload
+ str r7, [sp, #144] @ 4-byte Spill
+ ldr r7, [sp, #204] @ 4-byte Reload
+ adcs r7, r7, r6
+ ldr r6, [sp, #152] @ 4-byte Reload
+ str r7, [sp, #148] @ 4-byte Spill
+ ldr r7, [sp, #208] @ 4-byte Reload
+ adcs r7, r7, r6
+ str r7, [sp, #152] @ 4-byte Spill
+ ldr r7, [sp, #212] @ 4-byte Reload
+ adc r7, r7, r11
+ str r7, [sp, #156] @ 4-byte Spill
+ ldr r7, [sp, #124] @ 4-byte Reload
+ ands r7, r7, #1
+ moveq r1, r5
+ str r1, [r0, #68]
+ ldr r1, [sp, #96] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #160] @ 4-byte Reload
+ str r2, [r0, #72]
+ ldr r2, [sp, #120] @ 4-byte Reload
+ moveq r3, r1
+ ldr r1, [sp, #172] @ 4-byte Reload
+ cmp r7, #0
+ str r3, [r0, #76]
+ ldr r3, [sp, #156] @ 4-byte Reload
+ moveq r12, r1
+ ldr r1, [sp, #176] @ 4-byte Reload
+ str r12, [r0, #80]
+ moveq lr, r1
+ ldr r1, [sp, #180] @ 4-byte Reload
+ str lr, [r0, #84]
+ moveq r4, r1
+ ldr r1, [sp, #184] @ 4-byte Reload
+ cmp r7, #0
+ str r4, [r0, #88]
+ moveq r8, r1
+ ldr r1, [sp, #188] @ 4-byte Reload
+ str r8, [r0, #92]
+ moveq r9, r1
+ ldr r1, [sp, #128] @ 4-byte Reload
+ str r9, [r0, #96]
+ moveq r10, r1
+ ldr r1, [sp, #164] @ 4-byte Reload
+ cmp r7, #0
+ str r10, [r0, #100]
+ moveq r2, r1
+ ldr r1, [sp, #168] @ 4-byte Reload
+ str r2, [r0, #104]
+ ldr r2, [sp, #132] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #192] @ 4-byte Reload
+ str r2, [r0, #108]
+ ldr r2, [sp, #136] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #196] @ 4-byte Reload
+ cmp r7, #0
+ str r2, [r0, #112]
+ ldr r2, [sp, #140] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #200] @ 4-byte Reload
+ str r2, [r0, #116]
+ ldr r2, [sp, #144] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #204] @ 4-byte Reload
+ str r2, [r0, #120]
+ ldr r2, [sp, #148] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #208] @ 4-byte Reload
+ cmp r7, #0
+ str r2, [r0, #124]
+ ldr r2, [sp, #152] @ 4-byte Reload
+ moveq r2, r1
+ ldr r1, [sp, #212] @ 4-byte Reload
+ str r2, [r0, #128]
+ moveq r3, r1
+ str r3, [r0, #132]
+ add sp, sp, #216
+ pop {r4, r5, r6, r7, r8, r9, r10, r11, lr}
+ mov pc, lr
+.Lfunc_end269:
+ .size mcl_fpDbl_sub17L, .Lfunc_end269-mcl_fpDbl_sub17L
+ .cantunwind
+ .fnend
+
+
+ .section ".note.GNU-stack","",%progbits
+ .eabi_attribute 30, 2 @ Tag_ABI_optimization_goals
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/low_arm.s b/vendor/github.com/tangerine-network/mcl/src/asm/low_arm.s
new file mode 100644
index 000000000..1ed2a1233
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/low_arm.s
@@ -0,0 +1,154 @@
+ .arch armv7-a
+
+ .align 2
+ .global mcl_fp_addPre64
+mcl_fp_addPre64:
+ ldm r1, {r3, r12}
+ ldm r2, {r1, r2}
+ adds r1, r1, r3
+ adc r2, r2, r12
+ stm r0, {r1, r2}
+ bx lr
+
+
+ .align 2
+ .global mcl_fp_addPre96
+mcl_fp_addPre96:
+ push {r4, lr}
+ ldm r1, {r1, r3, r12}
+ ldm r2, {r2, r4, lr}
+ adds r1, r1, r2
+ adcs r3, r3, r4
+ adc r12, r12, lr
+ stm r0, {r1, r3, r12}
+ pop {r4, lr}
+ bx lr
+
+# slower
+ .align 2
+ .global mcl_fp_addPre96_2
+mcl_fp_addPre96_2:
+ ldr r3, [r1], #4
+ ldr r12, [r2], #4
+ adds r3, r3, r12
+ str r3, [r0], #4
+
+ ldm r1, {r1, r3}
+ ldm r2, {r2, r12}
+ adcs r1, r1, r2
+ adcs r3, r3, r12
+ stm r0, {r1, r3}
+ bx lr
+
+ .globl mcl_fp_addPre128
+ .align 2
+mcl_fp_addPre128:
+ push {r4, lr}
+ ldm r1!, {r3, r4}
+ ldm r2!, {r12, lr}
+ adds r3, r3, r12
+ adcs r4, r4, lr
+ stm r0!, {r3, r4}
+ ldm r1, {r3, r4}
+ ldm r2, {r12, lr}
+ adcs r3, r3, r12
+ adcs r4, r4, lr
+ stm r0, {r3, r4}
+ pop {r4, lr}
+ bx lr
+
+ # almost same
+ .globl mcl_fp_addPre128_2
+ .align 2
+cl_fp_addPre128_2:
+ push {r4, r5, r6, lr}
+ ldm r1, {r1, r3, r4, r5}
+ ldm r2, {r2, r6, r12, lr}
+ adds r1, r1, r2
+ adcs r3, r3, r6
+ adcs r4, r4, r12
+ adcs r5, r5, lr
+ stm r0, {r1, r3, r4, r5}
+ pop {r4, r5, r6, lr}
+ bx lr
+
+ .globl mcl_fp_addPre160
+ .align 2
+mcl_fp_addPre160:
+ push {r4, lr}
+ ldm r1!, {r3, r4}
+ ldm r2!, {r12, lr}
+ adds r3, r3, r12
+ adcs r4, r4, lr
+ stm r0!, {r3, r4}
+ ldm r1, {r1, r3, r4}
+ ldm r2, {r2, r12, lr}
+ adcs r1, r1, r2
+ adcs r3, r3, r12
+ adcs r4, r4, lr
+ stm r0, {r1, r3, r4}
+ pop {r4, lr}
+ bx lr
+
+ .globl mcl_fp_addPre192
+ .align 2
+mcl_fp_addPre192:
+ push {r4, r5, r6, lr}
+ ldm r1!, {r3, r4, r5}
+ ldm r2!, {r6, r12, lr}
+ adds r3, r3, r6
+ adcs r4, r4, r12
+ adcs r5, r5, lr
+ stm r0!, {r3, r4, r5}
+
+ ldm r1, {r3, r4, r5}
+ ldm r2, {r6, r12, lr}
+ adcs r3, r3, r6
+ adcs r4, r4, r12
+ adcs r5, r5, lr
+ stm r0, {r3, r4, r5}
+ pop {r4, r5, r6, lr}
+ bx lr
+
+ .globl mcl_fp_addPre224
+ .align 2
+mcl_fp_addPre224:
+ push {r4, r5, r6, lr}
+ ldm r1!, {r3, r4, r5}
+ ldm r2!, {r6, r12, lr}
+ adds r3, r3, r6
+ adcs r4, r4, r12
+ adcs r5, r5, lr
+ stm r0!, {r3, r4, r5}
+
+ ldm r1, {r1, r3, r4, r5}
+ ldm r2, {r2, r6, r12, lr}
+ adcs r1, r1, r2
+ adcs r3, r3, r6
+ adcs r4, r4, r12
+ adcs r5, r5, lr
+ stm r0, {r1, r3, r4, r5}
+ pop {r4, r5, r6, lr}
+ bx lr
+
+ .globl mcl_fp_addPre256
+ .align 2
+mcl_fp_addPre256:
+ push {r4, r5, r6, r7, r8, lr}
+ ldm r1!, {r3, r4, r5, r6}
+ ldm r2!, {r7, r8, r12, lr}
+ adds r3, r3, r7
+ adcs r4, r4, r8
+ adcs r5, r5, r12
+ adcs r6, r6, lr
+ stm r0!, {r3, r4, r5, r6}
+
+ ldm r1, {r3, r4, r5, r6}
+ ldm r2, {r7, r8, r12, lr}
+ adcs r3, r3, r7
+ adcs r4, r4, r8
+ adcs r5, r5, r12
+ adcs r6, r6, lr
+ stm r0, {r3, r4, r5, r6}
+ pop {r4, r5, r6, r7, r8, lr}
+ bx lr
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/low_x86-64.asm b/vendor/github.com/tangerine-network/mcl/src/asm/low_x86-64.asm
new file mode 100644
index 000000000..b09b9dcd3
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/low_x86-64.asm
@@ -0,0 +1,153 @@
+
+; Linux rdi rsi rdx rcx
+; Win rcx rdx r8 r9
+
+%ifdef _WIN64
+ %define p1org rcx
+ %define p2org rdx
+ %define p3org r8
+ %define p4org r9
+%else
+ %define p1org rdi
+ %define p2org rsi
+ %define p3org rdx
+ %define p4org rcx
+%endif
+
+%imacro proc 1
+global %1
+%1:
+%endmacro
+
+segment .text
+
+%imacro addPre 1
+ mov rax, [p2org]
+ add rax, [p3org]
+ mov [p1org], rax
+%assign i 1
+%rep %1
+ mov rax, [p2org + i * 8]
+ adc rax, [p3org + i * 8]
+ mov [p1org + i * 8], rax
+%assign i (i+1)
+%endrep
+ setc al
+ movzx eax, al
+ ret
+%endmacro
+
+%imacro subNC 1
+ mov rax, [p2org]
+ sub rax, [p3org]
+ mov [p1org], rax
+%assign i 1
+%rep %1
+ mov rax, [p2org + i * 8]
+ sbb rax, [p3org + i * 8]
+ mov [p1org + i * 8], rax
+%assign i (i+1)
+%endrep
+ setc al
+ movzx eax, al
+ ret
+%endmacro
+
+proc mcl_fp_addPre64
+ addPre 0
+proc mcl_fp_addPre128
+ addPre 1
+proc mcl_fp_addPre192
+ addPre 2
+proc mcl_fp_addPre256
+ addPre 3
+proc mcl_fp_addPre320
+ addPre 4
+proc mcl_fp_addPre384
+ addPre 5
+proc mcl_fp_addPre448
+ addPre 6
+proc mcl_fp_addPre512
+ addPre 7
+proc mcl_fp_addPre576
+ addPre 8
+proc mcl_fp_addPre640
+ addPre 9
+proc mcl_fp_addPre704
+ addPre 10
+proc mcl_fp_addPre768
+ addPre 11
+proc mcl_fp_addPre832
+ addPre 12
+proc mcl_fp_addPre896
+ addPre 13
+proc mcl_fp_addPre960
+ addPre 14
+proc mcl_fp_addPre1024
+ addPre 15
+proc mcl_fp_addPre1088
+ addPre 16
+proc mcl_fp_addPre1152
+ addPre 17
+proc mcl_fp_addPre1216
+ addPre 18
+proc mcl_fp_addPre1280
+ addPre 19
+proc mcl_fp_addPre1344
+ addPre 20
+proc mcl_fp_addPre1408
+ addPre 21
+proc mcl_fp_addPre1472
+ addPre 22
+proc mcl_fp_addPre1536
+ addPre 23
+
+proc mcl_fp_subNC64
+ subNC 0
+proc mcl_fp_subNC128
+ subNC 1
+proc mcl_fp_subNC192
+ subNC 2
+proc mcl_fp_subNC256
+ subNC 3
+proc mcl_fp_subNC320
+ subNC 4
+proc mcl_fp_subNC384
+ subNC 5
+proc mcl_fp_subNC448
+ subNC 6
+proc mcl_fp_subNC512
+ subNC 7
+proc mcl_fp_subNC576
+ subNC 8
+proc mcl_fp_subNC640
+ subNC 9
+proc mcl_fp_subNC704
+ subNC 10
+proc mcl_fp_subNC768
+ subNC 11
+proc mcl_fp_subNC832
+ subNC 12
+proc mcl_fp_subNC896
+ subNC 13
+proc mcl_fp_subNC960
+ subNC 14
+proc mcl_fp_subNC1024
+ subNC 15
+proc mcl_fp_subNC1088
+ subNC 16
+proc mcl_fp_subNC1152
+ subNC 17
+proc mcl_fp_subNC1216
+ subNC 18
+proc mcl_fp_subNC1280
+ subNC 19
+proc mcl_fp_subNC1344
+ subNC 20
+proc mcl_fp_subNC1408
+ subNC 21
+proc mcl_fp_subNC1472
+ subNC 22
+proc mcl_fp_subNC1536
+ subNC 23
+
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/low_x86.asm b/vendor/github.com/tangerine-network/mcl/src/asm/low_x86.asm
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/low_x86.asm
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/x86-64.bmi2.s b/vendor/github.com/tangerine-network/mcl/src/asm/x86-64.bmi2.s
new file mode 100644
index 000000000..e12174ac6
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/x86-64.bmi2.s
@@ -0,0 +1,14155 @@
+ .text
+ .file "<stdin>"
+ .globl makeNIST_P192Lbmi2
+ .align 16, 0x90
+ .type makeNIST_P192Lbmi2,@function
+makeNIST_P192Lbmi2: # @makeNIST_P192Lbmi2
+# BB#0:
+ movq $-1, %rax
+ movq $-2, %rdx
+ movq $-1, %rcx
+ retq
+.Lfunc_end0:
+ .size makeNIST_P192Lbmi2, .Lfunc_end0-makeNIST_P192Lbmi2
+
+ .globl mcl_fpDbl_mod_NIST_P192Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P192Lbmi2,@function
+mcl_fpDbl_mod_NIST_P192Lbmi2: # @mcl_fpDbl_mod_NIST_P192Lbmi2
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 16(%rsi), %r10
+ movq 24(%rsi), %r8
+ movq 40(%rsi), %r9
+ movq 8(%rsi), %rax
+ addq %r9, %rax
+ adcq $0, %r10
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq 32(%rsi), %r11
+ movq (%rsi), %r14
+ addq %r8, %r14
+ adcq %r11, %rax
+ adcq %r9, %r10
+ adcq $0, %rcx
+ addq %r9, %r14
+ adcq %r8, %rax
+ adcq %r11, %r10
+ adcq $0, %rcx
+ addq %rcx, %r14
+ adcq %rax, %rcx
+ adcq $0, %r10
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r14, %rsi
+ addq $1, %rsi
+ movq %rcx, %rdx
+ adcq $1, %rdx
+ movq %r10, %rbx
+ adcq $0, %rbx
+ adcq $-1, %rax
+ andl $1, %eax
+ cmovneq %r14, %rsi
+ movq %rsi, (%rdi)
+ testb %al, %al
+ cmovneq %rcx, %rdx
+ movq %rdx, 8(%rdi)
+ cmovneq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end1:
+ .size mcl_fpDbl_mod_NIST_P192Lbmi2, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192Lbmi2
+
+ .globl mcl_fp_sqr_NIST_P192Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sqr_NIST_P192Lbmi2,@function
+mcl_fp_sqr_NIST_P192Lbmi2: # @mcl_fp_sqr_NIST_P192Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r8
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ movq %r8, %rdx
+ mulxq %rsi, %r14, %rbx
+ movq %rbx, -16(%rsp) # 8-byte Spill
+ movq %rsi, %rdx
+ mulxq %rsi, %r13, %r15
+ movq %rsi, %rdx
+ mulxq %rcx, %r12, %rsi
+ addq %rsi, %r13
+ adcq %r14, %r15
+ adcq $0, %rbx
+ movq %rcx, %rdx
+ mulxq %rcx, %r9, %rax
+ addq %r12, %rax
+ movq %r8, %rdx
+ mulxq %rcx, %rbp, %r11
+ adcq %rbp, %rsi
+ movq %r11, %r10
+ adcq $0, %r10
+ addq %r12, %rax
+ adcq %r13, %rsi
+ adcq %r15, %r10
+ adcq $0, %rbx
+ movq %r8, %rdx
+ mulxq %r8, %rcx, %rdi
+ addq %r14, %r11
+ adcq -16(%rsp), %rcx # 8-byte Folded Reload
+ adcq $0, %rdi
+ addq %rbp, %rsi
+ adcq %r10, %r11
+ adcq %rbx, %rcx
+ adcq $0, %rdi
+ addq %rdi, %rax
+ adcq $0, %rsi
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ addq %r11, %r9
+ adcq %rcx, %rax
+ adcq %rdi, %rsi
+ adcq $0, %rdx
+ addq %rdi, %r9
+ adcq %r11, %rax
+ adcq %rcx, %rsi
+ adcq $0, %rdx
+ addq %rdx, %r9
+ adcq %rax, %rdx
+ adcq $0, %rsi
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r9, %rcx
+ addq $1, %rcx
+ movq %rdx, %rdi
+ adcq $1, %rdi
+ movq %rsi, %rbp
+ adcq $0, %rbp
+ adcq $-1, %rax
+ andl $1, %eax
+ cmovneq %r9, %rcx
+ movq -8(%rsp), %rbx # 8-byte Reload
+ movq %rcx, (%rbx)
+ testb %al, %al
+ cmovneq %rdx, %rdi
+ movq %rdi, 8(%rbx)
+ cmovneq %rsi, %rbp
+ movq %rbp, 16(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end2:
+ .size mcl_fp_sqr_NIST_P192Lbmi2, .Lfunc_end2-mcl_fp_sqr_NIST_P192Lbmi2
+
+ .globl mcl_fp_mulNIST_P192Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulNIST_P192Lbmi2,@function
+mcl_fp_mulNIST_P192Lbmi2: # @mcl_fp_mulNIST_P192Lbmi2
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ subq $56, %rsp
+ movq %rdi, %r14
+ leaq 8(%rsp), %rdi
+ callq mcl_fpDbl_mulPre3Lbmi2@PLT
+ movq 24(%rsp), %r9
+ movq 32(%rsp), %r8
+ movq 48(%rsp), %rdi
+ movq 16(%rsp), %rbx
+ addq %rdi, %rbx
+ adcq $0, %r9
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq 40(%rsp), %rsi
+ movq 8(%rsp), %rdx
+ addq %r8, %rdx
+ adcq %rsi, %rbx
+ adcq %rdi, %r9
+ adcq $0, %rcx
+ addq %rdi, %rdx
+ adcq %r8, %rbx
+ adcq %rsi, %r9
+ adcq $0, %rcx
+ addq %rcx, %rdx
+ adcq %rbx, %rcx
+ adcq $0, %r9
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rdx, %rdi
+ addq $1, %rdi
+ movq %rcx, %rbx
+ adcq $1, %rbx
+ movq %r9, %rax
+ adcq $0, %rax
+ adcq $-1, %rsi
+ andl $1, %esi
+ cmovneq %rdx, %rdi
+ movq %rdi, (%r14)
+ testb %sil, %sil
+ cmovneq %rcx, %rbx
+ movq %rbx, 8(%r14)
+ cmovneq %r9, %rax
+ movq %rax, 16(%r14)
+ addq $56, %rsp
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end3:
+ .size mcl_fp_mulNIST_P192Lbmi2, .Lfunc_end3-mcl_fp_mulNIST_P192Lbmi2
+
+ .globl mcl_fpDbl_mod_NIST_P521Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P521Lbmi2,@function
+mcl_fpDbl_mod_NIST_P521Lbmi2: # @mcl_fpDbl_mod_NIST_P521Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 120(%rsi), %r9
+ movq 128(%rsi), %r14
+ movq %r14, %r8
+ shldq $55, %r9, %r8
+ movq 112(%rsi), %r10
+ shldq $55, %r10, %r9
+ movq 104(%rsi), %r11
+ shldq $55, %r11, %r10
+ movq 96(%rsi), %r15
+ shldq $55, %r15, %r11
+ movq 88(%rsi), %r12
+ shldq $55, %r12, %r15
+ movq 80(%rsi), %rcx
+ shldq $55, %rcx, %r12
+ movq 64(%rsi), %rbx
+ movq 72(%rsi), %rax
+ shldq $55, %rax, %rcx
+ shrq $9, %r14
+ shldq $55, %rbx, %rax
+ andl $511, %ebx # imm = 0x1FF
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r12
+ adcq 24(%rsi), %r15
+ adcq 32(%rsi), %r11
+ adcq 40(%rsi), %r10
+ adcq 48(%rsi), %r9
+ adcq 56(%rsi), %r8
+ adcq %r14, %rbx
+ movq %rbx, %rsi
+ shrq $9, %rsi
+ andl $1, %esi
+ addq %rax, %rsi
+ adcq $0, %rcx
+ adcq $0, %r12
+ adcq $0, %r15
+ adcq $0, %r11
+ adcq $0, %r10
+ adcq $0, %r9
+ adcq $0, %r8
+ adcq $0, %rbx
+ movq %rsi, %rax
+ andq %r12, %rax
+ andq %r15, %rax
+ andq %r11, %rax
+ andq %r10, %rax
+ andq %r9, %rax
+ andq %r8, %rax
+ movq %rbx, %rdx
+ orq $-512, %rdx # imm = 0xFFFFFFFFFFFFFE00
+ andq %rax, %rdx
+ andq %rcx, %rdx
+ cmpq $-1, %rdx
+ je .LBB4_1
+# BB#3: # %nonzero
+ movq %rsi, (%rdi)
+ movq %rcx, 8(%rdi)
+ movq %r12, 16(%rdi)
+ movq %r15, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %r8, 56(%rdi)
+ andl $511, %ebx # imm = 0x1FF
+ movq %rbx, 64(%rdi)
+ jmp .LBB4_2
+.LBB4_1: # %zero
+ movq $0, 64(%rdi)
+ movq $0, 56(%rdi)
+ movq $0, 48(%rdi)
+ movq $0, 40(%rdi)
+ movq $0, 32(%rdi)
+ movq $0, 24(%rdi)
+ movq $0, 16(%rdi)
+ movq $0, 8(%rdi)
+ movq $0, (%rdi)
+.LBB4_2: # %zero
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end4:
+ .size mcl_fpDbl_mod_NIST_P521Lbmi2, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521Lbmi2
+
+ .globl mcl_fp_mulUnitPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre1Lbmi2,@function
+mcl_fp_mulUnitPre1Lbmi2: # @mcl_fp_mulUnitPre1Lbmi2
+# BB#0:
+ mulxq (%rsi), %rcx, %rax
+ movq %rcx, (%rdi)
+ movq %rax, 8(%rdi)
+ retq
+.Lfunc_end5:
+ .size mcl_fp_mulUnitPre1Lbmi2, .Lfunc_end5-mcl_fp_mulUnitPre1Lbmi2
+
+ .globl mcl_fpDbl_mulPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre1Lbmi2,@function
+mcl_fpDbl_mulPre1Lbmi2: # @mcl_fpDbl_mulPre1Lbmi2
+# BB#0:
+ movq (%rdx), %rdx
+ mulxq (%rsi), %rcx, %rax
+ movq %rcx, (%rdi)
+ movq %rax, 8(%rdi)
+ retq
+.Lfunc_end6:
+ .size mcl_fpDbl_mulPre1Lbmi2, .Lfunc_end6-mcl_fpDbl_mulPre1Lbmi2
+
+ .globl mcl_fpDbl_sqrPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre1Lbmi2,@function
+mcl_fpDbl_sqrPre1Lbmi2: # @mcl_fpDbl_sqrPre1Lbmi2
+# BB#0:
+ movq (%rsi), %rdx
+ mulxq %rdx, %rcx, %rax
+ movq %rcx, (%rdi)
+ movq %rax, 8(%rdi)
+ retq
+.Lfunc_end7:
+ .size mcl_fpDbl_sqrPre1Lbmi2, .Lfunc_end7-mcl_fpDbl_sqrPre1Lbmi2
+
+ .globl mcl_fp_mont1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont1Lbmi2,@function
+mcl_fp_mont1Lbmi2: # @mcl_fp_mont1Lbmi2
+# BB#0:
+ movq %rdx, %rax
+ movq (%rsi), %rdx
+ mulxq (%rax), %rsi, %r8
+ movq -8(%rcx), %rdx
+ imulq %rsi, %rdx
+ movq (%rcx), %rcx
+ mulxq %rcx, %rdx, %rax
+ addq %rsi, %rdx
+ adcq %r8, %rax
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ movq %rax, %rsi
+ subq %rcx, %rsi
+ sbbq $0, %rdx
+ testb $1, %dl
+ cmovneq %rax, %rsi
+ movq %rsi, (%rdi)
+ retq
+.Lfunc_end8:
+ .size mcl_fp_mont1Lbmi2, .Lfunc_end8-mcl_fp_mont1Lbmi2
+
+ .globl mcl_fp_montNF1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF1Lbmi2,@function
+mcl_fp_montNF1Lbmi2: # @mcl_fp_montNF1Lbmi2
+# BB#0:
+ movq %rdx, %rax
+ movq (%rsi), %rdx
+ mulxq (%rax), %rsi, %r8
+ movq -8(%rcx), %rdx
+ imulq %rsi, %rdx
+ movq (%rcx), %rcx
+ mulxq %rcx, %rdx, %rax
+ addq %rsi, %rdx
+ adcq %r8, %rax
+ movq %rax, %rdx
+ subq %rcx, %rdx
+ cmovsq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+.Lfunc_end9:
+ .size mcl_fp_montNF1Lbmi2, .Lfunc_end9-mcl_fp_montNF1Lbmi2
+
+ .globl mcl_fp_montRed1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed1Lbmi2,@function
+mcl_fp_montRed1Lbmi2: # @mcl_fp_montRed1Lbmi2
+# BB#0:
+ movq (%rsi), %rcx
+ movq -8(%rdx), %rax
+ imulq %rcx, %rax
+ movq (%rdx), %r8
+ movq %rax, %rdx
+ mulxq %r8, %rax, %rdx
+ addq %rcx, %rax
+ adcq 8(%rsi), %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rcx
+ subq %r8, %rcx
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rcx
+ movq %rcx, (%rdi)
+ retq
+.Lfunc_end10:
+ .size mcl_fp_montRed1Lbmi2, .Lfunc_end10-mcl_fp_montRed1Lbmi2
+
+ .globl mcl_fp_addPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre1Lbmi2,@function
+mcl_fp_addPre1Lbmi2: # @mcl_fp_addPre1Lbmi2
+# BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, (%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end11:
+ .size mcl_fp_addPre1Lbmi2, .Lfunc_end11-mcl_fp_addPre1Lbmi2
+
+ .globl mcl_fp_subPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre1Lbmi2,@function
+mcl_fp_subPre1Lbmi2: # @mcl_fp_subPre1Lbmi2
+# BB#0:
+ movq (%rsi), %rcx
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ movq %rcx, (%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end12:
+ .size mcl_fp_subPre1Lbmi2, .Lfunc_end12-mcl_fp_subPre1Lbmi2
+
+ .globl mcl_fp_shr1_1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_1Lbmi2,@function
+mcl_fp_shr1_1Lbmi2: # @mcl_fp_shr1_1Lbmi2
+# BB#0:
+ movq (%rsi), %rax
+ shrq %rax
+ movq %rax, (%rdi)
+ retq
+.Lfunc_end13:
+ .size mcl_fp_shr1_1Lbmi2, .Lfunc_end13-mcl_fp_shr1_1Lbmi2
+
+ .globl mcl_fp_add1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add1Lbmi2,@function
+mcl_fp_add1Lbmi2: # @mcl_fp_add1Lbmi2
+# BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, (%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %rax
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne .LBB14_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+.LBB14_2: # %carry
+ retq
+.Lfunc_end14:
+ .size mcl_fp_add1Lbmi2, .Lfunc_end14-mcl_fp_add1Lbmi2
+
+ .globl mcl_fp_addNF1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF1Lbmi2,@function
+mcl_fp_addNF1Lbmi2: # @mcl_fp_addNF1Lbmi2
+# BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, %rdx
+ subq (%rcx), %rdx
+ cmovsq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+.Lfunc_end15:
+ .size mcl_fp_addNF1Lbmi2, .Lfunc_end15-mcl_fp_addNF1Lbmi2
+
+ .globl mcl_fp_sub1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub1Lbmi2,@function
+mcl_fp_sub1Lbmi2: # @mcl_fp_sub1Lbmi2
+# BB#0:
+ movq (%rsi), %rax
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ movq %rax, (%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB16_2
+# BB#1: # %nocarry
+ retq
+.LBB16_2: # %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ retq
+.Lfunc_end16:
+ .size mcl_fp_sub1Lbmi2, .Lfunc_end16-mcl_fp_sub1Lbmi2
+
+ .globl mcl_fp_subNF1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF1Lbmi2,@function
+mcl_fp_subNF1Lbmi2: # @mcl_fp_subNF1Lbmi2
+# BB#0:
+ movq (%rsi), %rax
+ subq (%rdx), %rax
+ movq %rax, %rdx
+ sarq $63, %rdx
+ andq (%rcx), %rdx
+ addq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+.Lfunc_end17:
+ .size mcl_fp_subNF1Lbmi2, .Lfunc_end17-mcl_fp_subNF1Lbmi2
+
+ .globl mcl_fpDbl_add1Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add1Lbmi2,@function
+mcl_fpDbl_add1Lbmi2: # @mcl_fpDbl_add1Lbmi2
+# BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ movq %rax, (%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rsi
+ subq (%rcx), %rsi
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ retq
+.Lfunc_end18:
+ .size mcl_fpDbl_add1Lbmi2, .Lfunc_end18-mcl_fpDbl_add1Lbmi2
+
+ .globl mcl_fpDbl_sub1Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub1Lbmi2,@function
+mcl_fpDbl_sub1Lbmi2: # @mcl_fpDbl_sub1Lbmi2
+# BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %r8
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r8
+ movq %rax, (%rdi)
+ movl $0, %eax
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq (%rcx), %rsi
+ addq %r8, %rsi
+ movq %rsi, 8(%rdi)
+ retq
+.Lfunc_end19:
+ .size mcl_fpDbl_sub1Lbmi2, .Lfunc_end19-mcl_fpDbl_sub1Lbmi2
+
+ .globl mcl_fp_mulUnitPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre2Lbmi2,@function
+mcl_fp_mulUnitPre2Lbmi2: # @mcl_fp_mulUnitPre2Lbmi2
+# BB#0:
+ mulxq 8(%rsi), %rax, %rcx
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %rax, %rsi
+ movq %rsi, 8(%rdi)
+ adcq $0, %rcx
+ movq %rcx, 16(%rdi)
+ retq
+.Lfunc_end20:
+ .size mcl_fp_mulUnitPre2Lbmi2, .Lfunc_end20-mcl_fp_mulUnitPre2Lbmi2
+
+ .globl mcl_fpDbl_mulPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre2Lbmi2,@function
+mcl_fpDbl_mulPre2Lbmi2: # @mcl_fpDbl_mulPre2Lbmi2
+# BB#0:
+ movq %rdx, %r10
+ movq (%rsi), %r11
+ movq 8(%rsi), %r8
+ movq (%r10), %rsi
+ movq %r11, %rdx
+ mulxq %rsi, %rdx, %r9
+ movq %rdx, (%rdi)
+ movq %r8, %rdx
+ mulxq %rsi, %rsi, %rax
+ addq %r9, %rsi
+ adcq $0, %rax
+ movq 8(%r10), %rcx
+ movq %r11, %rdx
+ mulxq %rcx, %rdx, %r9
+ addq %rsi, %rdx
+ movq %rdx, 8(%rdi)
+ movq %r8, %rdx
+ mulxq %rcx, %rdx, %rcx
+ adcq %rax, %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r9, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %rcx, %rax
+ movq %rax, 24(%rdi)
+ retq
+.Lfunc_end21:
+ .size mcl_fpDbl_mulPre2Lbmi2, .Lfunc_end21-mcl_fpDbl_mulPre2Lbmi2
+
+ .globl mcl_fpDbl_sqrPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre2Lbmi2,@function
+mcl_fpDbl_sqrPre2Lbmi2: # @mcl_fpDbl_sqrPre2Lbmi2
+# BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %rcx
+ movq %rax, %rdx
+ mulxq %rax, %rdx, %rsi
+ movq %rdx, (%rdi)
+ movq %rcx, %rdx
+ mulxq %rax, %rdx, %r8
+ addq %rdx, %rsi
+ movq %r8, %rax
+ adcq $0, %rax
+ addq %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %rcx
+ adcq %rax, %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r8, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %rcx, %rax
+ movq %rax, 24(%rdi)
+ retq
+.Lfunc_end22:
+ .size mcl_fpDbl_sqrPre2Lbmi2, .Lfunc_end22-mcl_fpDbl_sqrPre2Lbmi2
+
+ .globl mcl_fp_mont2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont2Lbmi2,@function
+mcl_fp_mont2Lbmi2: # @mcl_fp_mont2Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %r11
+ movq %r9, %rdx
+ mulxq %rax, %r10, %r13
+ movq %r8, %rdx
+ mulxq %rax, %r14, %rsi
+ addq %r10, %rsi
+ adcq $0, %r13
+ movq -8(%rcx), %rbp
+ movq (%rcx), %r10
+ movq %r14, %rdx
+ imulq %rbp, %rdx
+ movq 8(%rcx), %r15
+ mulxq %r15, %r12, %rcx
+ mulxq %r10, %rdx, %rbx
+ addq %r12, %rbx
+ adcq $0, %rcx
+ addq %r14, %rdx
+ adcq %rsi, %rbx
+ adcq %r13, %rcx
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %r11, %rdx
+ mulxq %r9, %r9, %r14
+ movq %r11, %rdx
+ mulxq %r8, %r8, %rax
+ addq %r9, %rax
+ adcq $0, %r14
+ addq %rbx, %r8
+ adcq %rcx, %rax
+ adcq %rsi, %r14
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ imulq %r8, %rbp
+ movq %rbp, %rdx
+ mulxq %r15, %rcx, %rbx
+ movq %rbp, %rdx
+ mulxq %r10, %rdx, %rbp
+ addq %rcx, %rbp
+ adcq $0, %rbx
+ addq %r8, %rdx
+ adcq %rax, %rbp
+ adcq %r14, %rbx
+ adcq $0, %rsi
+ movq %rbp, %rax
+ subq %r10, %rax
+ movq %rbx, %rcx
+ sbbq %r15, %rcx
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rbx, %rcx
+ testb %sil, %sil
+ cmovneq %rbp, %rax
+ movq %rax, (%rdi)
+ movq %rcx, 8(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end23:
+ .size mcl_fp_mont2Lbmi2, .Lfunc_end23-mcl_fp_mont2Lbmi2
+
+ .globl mcl_fp_montNF2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF2Lbmi2,@function
+mcl_fp_montNF2Lbmi2: # @mcl_fp_montNF2Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %r11
+ movq %r9, %rdx
+ mulxq %rax, %r10, %rsi
+ movq %r8, %rdx
+ mulxq %rax, %r15, %r13
+ addq %r10, %r13
+ adcq $0, %rsi
+ movq -8(%rcx), %rbp
+ movq (%rcx), %r10
+ movq %r15, %rdx
+ imulq %rbp, %rdx
+ movq 8(%rcx), %r14
+ mulxq %r10, %rcx, %r12
+ addq %r15, %rcx
+ mulxq %r14, %rbx, %rcx
+ adcq %r13, %rbx
+ adcq $0, %rsi
+ addq %r12, %rbx
+ adcq %rcx, %rsi
+ movq %r11, %rdx
+ mulxq %r9, %r9, %rcx
+ movq %r11, %rdx
+ mulxq %r8, %r8, %rax
+ addq %r9, %rax
+ adcq $0, %rcx
+ addq %rbx, %r8
+ adcq %rsi, %rax
+ adcq $0, %rcx
+ imulq %r8, %rbp
+ movq %rbp, %rdx
+ mulxq %r14, %rbx, %rsi
+ movq %rbp, %rdx
+ mulxq %r10, %rbp, %rdx
+ addq %r8, %rbp
+ adcq %rax, %rbx
+ adcq $0, %rcx
+ addq %rdx, %rbx
+ adcq %rsi, %rcx
+ movq %rbx, %rax
+ subq %r10, %rax
+ movq %rcx, %rdx
+ sbbq %r14, %rdx
+ cmovsq %rbx, %rax
+ movq %rax, (%rdi)
+ cmovsq %rcx, %rdx
+ movq %rdx, 8(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end24:
+ .size mcl_fp_montNF2Lbmi2, .Lfunc_end24-mcl_fp_montNF2Lbmi2
+
+ .globl mcl_fp_montRed2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed2Lbmi2,@function
+mcl_fp_montRed2Lbmi2: # @mcl_fp_montRed2Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq -8(%rdx), %r15
+ movq (%rdx), %r8
+ movq (%rsi), %r10
+ movq %r10, %rcx
+ imulq %r15, %rcx
+ movq 8(%rdx), %r9
+ movq %rcx, %rdx
+ mulxq %r9, %r11, %r14
+ movq %rcx, %rdx
+ mulxq %r8, %rcx, %rax
+ addq %r11, %rax
+ adcq $0, %r14
+ movq 24(%rsi), %r11
+ addq %r10, %rcx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r14
+ adcq $0, %r11
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ imulq %rax, %r15
+ movq %r15, %rdx
+ mulxq %r9, %r10, %rbx
+ movq %r15, %rdx
+ mulxq %r8, %rsi, %rdx
+ addq %r10, %rdx
+ adcq $0, %rbx
+ addq %rax, %rsi
+ adcq %r14, %rdx
+ adcq %r11, %rbx
+ adcq $0, %rcx
+ movq %rdx, %rax
+ subq %r8, %rax
+ movq %rbx, %rsi
+ sbbq %r9, %rsi
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rbx, %rsi
+ testb %cl, %cl
+ cmovneq %rdx, %rax
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end25:
+ .size mcl_fp_montRed2Lbmi2, .Lfunc_end25-mcl_fp_montRed2Lbmi2
+
+ .globl mcl_fp_addPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre2Lbmi2,@function
+mcl_fp_addPre2Lbmi2: # @mcl_fp_addPre2Lbmi2
+# BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rcx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rcx
+ movq %rax, (%rdi)
+ movq %rcx, 8(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end26:
+ .size mcl_fp_addPre2Lbmi2, .Lfunc_end26-mcl_fp_addPre2Lbmi2
+
+ .globl mcl_fp_subPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre2Lbmi2,@function
+mcl_fp_subPre2Lbmi2: # @mcl_fp_subPre2Lbmi2
+# BB#0:
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end27:
+ .size mcl_fp_subPre2Lbmi2, .Lfunc_end27-mcl_fp_subPre2Lbmi2
+
+ .globl mcl_fp_shr1_2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_2Lbmi2,@function
+mcl_fp_shr1_2Lbmi2: # @mcl_fp_shr1_2Lbmi2
+# BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %rcx
+ shrdq $1, %rcx, %rax
+ movq %rax, (%rdi)
+ shrq %rcx
+ movq %rcx, 8(%rdi)
+ retq
+.Lfunc_end28:
+ .size mcl_fp_shr1_2Lbmi2, .Lfunc_end28-mcl_fp_shr1_2Lbmi2
+
+ .globl mcl_fp_add2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add2Lbmi2,@function
+mcl_fp_add2Lbmi2: # @mcl_fp_add2Lbmi2
+# BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB29_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+.LBB29_2: # %carry
+ retq
+.Lfunc_end29:
+ .size mcl_fp_add2Lbmi2, .Lfunc_end29-mcl_fp_add2Lbmi2
+
+ .globl mcl_fp_addNF2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF2Lbmi2,@function
+mcl_fp_addNF2Lbmi2: # @mcl_fp_addNF2Lbmi2
+# BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %r8
+ addq (%rsi), %rax
+ adcq 8(%rsi), %r8
+ movq %rax, %rsi
+ subq (%rcx), %rsi
+ movq %r8, %rdx
+ sbbq 8(%rcx), %rdx
+ testq %rdx, %rdx
+ cmovsq %rax, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r8, %rdx
+ movq %rdx, 8(%rdi)
+ retq
+.Lfunc_end30:
+ .size mcl_fp_addNF2Lbmi2, .Lfunc_end30-mcl_fp_addNF2Lbmi2
+
+ .globl mcl_fp_sub2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub2Lbmi2,@function
+mcl_fp_sub2Lbmi2: # @mcl_fp_sub2Lbmi2
+# BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %r8
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r8
+ movq %rax, (%rdi)
+ movq %r8, 8(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB31_2
+# BB#1: # %nocarry
+ retq
+.LBB31_2: # %carry
+ movq 8(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r8, %rdx
+ movq %rdx, 8(%rdi)
+ retq
+.Lfunc_end31:
+ .size mcl_fp_sub2Lbmi2, .Lfunc_end31-mcl_fp_sub2Lbmi2
+
+ .globl mcl_fp_subNF2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF2Lbmi2,@function
+mcl_fp_subNF2Lbmi2: # @mcl_fp_subNF2Lbmi2
+# BB#0:
+ movq (%rsi), %r8
+ movq 8(%rsi), %rsi
+ subq (%rdx), %r8
+ sbbq 8(%rdx), %rsi
+ movq %rsi, %rdx
+ sarq $63, %rdx
+ movq 8(%rcx), %rax
+ andq %rdx, %rax
+ andq (%rcx), %rdx
+ addq %r8, %rdx
+ movq %rdx, (%rdi)
+ adcq %rsi, %rax
+ movq %rax, 8(%rdi)
+ retq
+.Lfunc_end32:
+ .size mcl_fp_subNF2Lbmi2, .Lfunc_end32-mcl_fp_subNF2Lbmi2
+
+ .globl mcl_fpDbl_add2Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add2Lbmi2,@function
+mcl_fpDbl_add2Lbmi2: # @mcl_fpDbl_add2Lbmi2
+# BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rdx), %r10
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r10
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ adcq %r8, %r9
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r10, %rdx
+ subq (%rcx), %rdx
+ movq %r9, %rsi
+ sbbq 8(%rcx), %rsi
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %r10, %rdx
+ movq %rdx, 16(%rdi)
+ testb %al, %al
+ cmovneq %r9, %rsi
+ movq %rsi, 24(%rdi)
+ retq
+.Lfunc_end33:
+ .size mcl_fpDbl_add2Lbmi2, .Lfunc_end33-mcl_fpDbl_add2Lbmi2
+
+ .globl mcl_fpDbl_sub2Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub2Lbmi2,@function
+mcl_fpDbl_sub2Lbmi2: # @mcl_fpDbl_sub2Lbmi2
+# BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %r11
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %r11
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %r11, (%rdi)
+ movq %rsi, 8(%rdi)
+ sbbq %r8, %r9
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ cmovneq 8(%rcx), %rax
+ addq %r10, %rsi
+ movq %rsi, 16(%rdi)
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ retq
+.Lfunc_end34:
+ .size mcl_fpDbl_sub2Lbmi2, .Lfunc_end34-mcl_fpDbl_sub2Lbmi2
+
+ .globl mcl_fp_mulUnitPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre3Lbmi2,@function
+mcl_fp_mulUnitPre3Lbmi2: # @mcl_fp_mulUnitPre3Lbmi2
+# BB#0:
+ mulxq 16(%rsi), %r8, %rcx
+ mulxq 8(%rsi), %r9, %rax
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %r9, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r8, %rax
+ movq %rax, 16(%rdi)
+ adcq $0, %rcx
+ movq %rcx, 24(%rdi)
+ retq
+.Lfunc_end35:
+ .size mcl_fp_mulUnitPre3Lbmi2, .Lfunc_end35-mcl_fp_mulUnitPre3Lbmi2
+
+ .globl mcl_fpDbl_mulPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre3Lbmi2,@function
+mcl_fpDbl_mulPre3Lbmi2: # @mcl_fpDbl_mulPre3Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq %rdx, %r9
+ movq (%rsi), %r10
+ movq 8(%rsi), %r8
+ movq (%r9), %rax
+ movq %r10, %rdx
+ mulxq %rax, %rdx, %r14
+ movq 16(%rsi), %r11
+ movq %rdx, (%rdi)
+ movq %r11, %rdx
+ mulxq %rax, %rsi, %rbx
+ movq %r8, %rdx
+ mulxq %rax, %rax, %rcx
+ addq %r14, %rax
+ adcq %rsi, %rcx
+ adcq $0, %rbx
+ movq 8(%r9), %rsi
+ movq %r10, %rdx
+ mulxq %rsi, %rdx, %r14
+ addq %rax, %rdx
+ movq %rdx, 8(%rdi)
+ movq %r11, %rdx
+ mulxq %rsi, %rax, %r15
+ movq %r8, %rdx
+ mulxq %rsi, %rsi, %rdx
+ adcq %rcx, %rsi
+ adcq %rbx, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r14, %rsi
+ adcq %rdx, %rax
+ adcq %r15, %rcx
+ movq 16(%r9), %rbx
+ movq %r10, %rdx
+ mulxq %rbx, %rdx, %r9
+ addq %rsi, %rdx
+ movq %rdx, 16(%rdi)
+ movq %r11, %rdx
+ mulxq %rbx, %rsi, %r10
+ movq %r8, %rdx
+ mulxq %rbx, %rbx, %rdx
+ adcq %rax, %rbx
+ adcq %rcx, %rsi
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r9, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %rdx, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r10, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end36:
+ .size mcl_fpDbl_mulPre3Lbmi2, .Lfunc_end36-mcl_fpDbl_mulPre3Lbmi2
+
+ .globl mcl_fpDbl_sqrPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre3Lbmi2,@function
+mcl_fpDbl_sqrPre3Lbmi2: # @mcl_fpDbl_sqrPre3Lbmi2
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 16(%rsi), %r10
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %rax
+ movq %rdx, (%rdi)
+ movq %r10, %rdx
+ mulxq %rcx, %r11, %r8
+ movq %rsi, %rdx
+ mulxq %rcx, %rdx, %r14
+ addq %rdx, %rax
+ movq %r14, %rbx
+ adcq %r11, %rbx
+ movq %r8, %rcx
+ adcq $0, %rcx
+ addq %rdx, %rax
+ movq %rax, 8(%rdi)
+ movq %r10, %rdx
+ mulxq %rsi, %rax, %r9
+ movq %rsi, %rdx
+ mulxq %rsi, %rsi, %rdx
+ adcq %rbx, %rsi
+ adcq %rax, %rcx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq %r14, %rsi
+ adcq %rdx, %rcx
+ adcq %r9, %rbx
+ addq %r11, %rsi
+ movq %rsi, 16(%rdi)
+ movq %r10, %rdx
+ mulxq %r10, %rsi, %rdx
+ adcq %rax, %rcx
+ adcq %rbx, %rsi
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r8, %rcx
+ movq %rcx, 24(%rdi)
+ adcq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %rdx, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end37:
+ .size mcl_fpDbl_sqrPre3Lbmi2, .Lfunc_end37-mcl_fpDbl_sqrPre3Lbmi2
+
+ .globl mcl_fp_mont3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont3Lbmi2,@function
+mcl_fp_mont3Lbmi2: # @mcl_fp_mont3Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r15
+ movq %r15, -32(%rsp) # 8-byte Spill
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rdi
+ movq %rdi, -56(%rsp) # 8-byte Spill
+ movq (%r15), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %r14, %r11
+ movq (%rsi), %r12
+ movq %r12, -48(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ mulxq %rax, %rbx, %r8
+ movq %r12, %rdx
+ mulxq %rax, %r9, %rdi
+ addq %rbx, %rdi
+ adcq %r14, %r8
+ adcq $0, %r11
+ movq -8(%rcx), %r13
+ movq (%rcx), %rbx
+ movq %rbx, -8(%rsp) # 8-byte Spill
+ movq %r9, %rdx
+ imulq %r13, %rdx
+ movq 8(%rcx), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ mulxq %rax, %rax, %r10
+ mulxq %rbx, %rsi, %rbx
+ addq %rax, %rbx
+ movq 16(%rcx), %rbp
+ mulxq %rbp, %rcx, %rax
+ movq %rbp, %r14
+ adcq %r10, %rcx
+ adcq $0, %rax
+ addq %r9, %rsi
+ adcq %rdi, %rbx
+ movq 8(%r15), %rdx
+ adcq %r8, %rcx
+ adcq %r11, %rax
+ sbbq %r9, %r9
+ andl $1, %r9d
+ movq -56(%rsp), %r15 # 8-byte Reload
+ mulxq %r15, %r11, %rdi
+ mulxq -16(%rsp), %r10, %rsi # 8-byte Folded Reload
+ mulxq %r12, %r8, %rbp
+ addq %r10, %rbp
+ adcq %r11, %rsi
+ adcq $0, %rdi
+ addq %rbx, %r8
+ adcq %rcx, %rbp
+ adcq %rax, %rsi
+ adcq %r9, %rdi
+ sbbq %r11, %r11
+ andl $1, %r11d
+ movq %r8, %rdx
+ imulq %r13, %rdx
+ mulxq %r14, %r9, %rcx
+ movq %r14, %r12
+ movq -40(%rsp), %r14 # 8-byte Reload
+ mulxq %r14, %r10, %rax
+ mulxq -8(%rsp), %rdx, %rbx # 8-byte Folded Reload
+ addq %r10, %rbx
+ adcq %r9, %rax
+ adcq $0, %rcx
+ addq %r8, %rdx
+ adcq %rbp, %rbx
+ adcq %rsi, %rax
+ adcq %rdi, %rcx
+ adcq $0, %r11
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq %r15, %r9, %rsi
+ mulxq -16(%rsp), %r10, %r15 # 8-byte Folded Reload
+ mulxq -48(%rsp), %r8, %rdi # 8-byte Folded Reload
+ addq %r10, %rdi
+ adcq %r9, %r15
+ adcq $0, %rsi
+ addq %rbx, %r8
+ adcq %rax, %rdi
+ adcq %rcx, %r15
+ adcq %r11, %rsi
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ imulq %r8, %r13
+ movq %r13, %rdx
+ mulxq %r12, %r9, %rbp
+ movq %r13, %rdx
+ mulxq %r14, %r10, %rax
+ movq %r13, %rdx
+ movq -8(%rsp), %rcx # 8-byte Reload
+ mulxq %rcx, %r11, %rdx
+ addq %r10, %rdx
+ adcq %r9, %rax
+ adcq $0, %rbp
+ addq %r8, %r11
+ adcq %rdi, %rdx
+ adcq %r15, %rax
+ adcq %rsi, %rbp
+ adcq $0, %rbx
+ movq %rdx, %rsi
+ subq %rcx, %rsi
+ movq %rax, %rdi
+ sbbq %r14, %rdi
+ movq %rbp, %rcx
+ sbbq %r12, %rcx
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %rbp, %rcx
+ testb %bl, %bl
+ cmovneq %rdx, %rsi
+ movq -24(%rsp), %rdx # 8-byte Reload
+ movq %rsi, (%rdx)
+ cmovneq %rax, %rdi
+ movq %rdi, 8(%rdx)
+ movq %rcx, 16(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end38:
+ .size mcl_fp_mont3Lbmi2, .Lfunc_end38-mcl_fp_mont3Lbmi2
+
+ .globl mcl_fp_montNF3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF3Lbmi2,@function
+mcl_fp_montNF3Lbmi2: # @mcl_fp_montNF3Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq %rdx, %r10
+ movq %r10, -16(%rsp) # 8-byte Spill
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq (%rsi), %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -32(%rsp) # 8-byte Spill
+ movq (%r10), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %rbx, %r14
+ movq %rcx, %rdx
+ mulxq %rax, %r15, %r12
+ movq 16(%rsi), %r11
+ addq %rbx, %r12
+ movq %r11, %rdx
+ mulxq %rax, %rsi, %rbx
+ adcq %r14, %rsi
+ adcq $0, %rbx
+ movq -8(%r8), %r9
+ movq (%r8), %r14
+ movq %r15, %rdx
+ imulq %r9, %rdx
+ mulxq %r14, %rbp, %r13
+ addq %r15, %rbp
+ movq 8(%r8), %r15
+ mulxq %r15, %rdi, %rbp
+ adcq %r12, %rdi
+ movq 16(%r8), %r12
+ mulxq %r12, %rax, %r8
+ adcq %rsi, %rax
+ adcq $0, %rbx
+ addq %r13, %rdi
+ movq 8(%r10), %rdx
+ adcq %rbp, %rax
+ adcq %r8, %rbx
+ movq -32(%rsp), %r10 # 8-byte Reload
+ mulxq %r10, %rsi, %r8
+ mulxq %rcx, %r13, %rbp
+ addq %rsi, %rbp
+ mulxq %r11, %rcx, %rsi
+ adcq %r8, %rcx
+ adcq $0, %rsi
+ addq %rdi, %r13
+ adcq %rax, %rbp
+ adcq %rbx, %rcx
+ adcq $0, %rsi
+ movq %r13, %rdx
+ imulq %r9, %rdx
+ mulxq %r14, %rdi, %rbx
+ addq %r13, %rdi
+ mulxq %r15, %rax, %rdi
+ adcq %rbp, %rax
+ mulxq %r12, %rbp, %rdx
+ adcq %rcx, %rbp
+ adcq $0, %rsi
+ addq %rbx, %rax
+ adcq %rdi, %rbp
+ adcq %rdx, %rsi
+ movq -16(%rsp), %rcx # 8-byte Reload
+ movq 16(%rcx), %rdx
+ mulxq %r10, %rbx, %r8
+ mulxq -24(%rsp), %r10, %rdi # 8-byte Folded Reload
+ addq %rbx, %rdi
+ mulxq %r11, %rcx, %rbx
+ adcq %r8, %rcx
+ adcq $0, %rbx
+ addq %rax, %r10
+ adcq %rbp, %rdi
+ adcq %rsi, %rcx
+ adcq $0, %rbx
+ imulq %r10, %r9
+ movq %r9, %rdx
+ mulxq %r14, %rdx, %r8
+ addq %r10, %rdx
+ movq %r9, %rdx
+ mulxq %r12, %rbp, %rsi
+ movq %r9, %rdx
+ mulxq %r15, %rax, %rdx
+ adcq %rdi, %rax
+ adcq %rcx, %rbp
+ adcq $0, %rbx
+ addq %r8, %rax
+ adcq %rdx, %rbp
+ adcq %rsi, %rbx
+ movq %rax, %rcx
+ subq %r14, %rcx
+ movq %rbp, %rdx
+ sbbq %r15, %rdx
+ movq %rbx, %rsi
+ sbbq %r12, %rsi
+ movq %rsi, %rdi
+ sarq $63, %rdi
+ cmovsq %rax, %rcx
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq %rcx, (%rax)
+ cmovsq %rbp, %rdx
+ movq %rdx, 8(%rax)
+ cmovsq %rbx, %rsi
+ movq %rsi, 16(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end39:
+ .size mcl_fp_montNF3Lbmi2, .Lfunc_end39-mcl_fp_montNF3Lbmi2
+
+ .globl mcl_fp_montRed3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed3Lbmi2,@function
+mcl_fp_montRed3Lbmi2: # @mcl_fp_montRed3Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq -8(%rcx), %r15
+ movq (%rcx), %r9
+ movq (%rsi), %rbx
+ movq %rbx, %rdx
+ imulq %r15, %rdx
+ movq 16(%rcx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ mulxq %rax, %r14, %r11
+ movq %rax, %rbp
+ movq 8(%rcx), %r10
+ mulxq %r10, %rax, %r13
+ mulxq %r9, %rdx, %rcx
+ addq %rax, %rcx
+ adcq %r14, %r13
+ adcq $0, %r11
+ movq 40(%rsi), %r14
+ movq 32(%rsi), %r12
+ addq %rbx, %rdx
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r13
+ adcq 24(%rsi), %r11
+ adcq $0, %r12
+ adcq $0, %r14
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rcx, %rdx
+ imulq %r15, %rdx
+ mulxq %rbp, %rbp, %rdi
+ mulxq %r10, %r8, %rbx
+ mulxq %r9, %rdx, %rax
+ addq %r8, %rax
+ adcq %rbp, %rbx
+ adcq $0, %rdi
+ addq %rcx, %rdx
+ adcq %r13, %rax
+ adcq %r11, %rbx
+ adcq %r12, %rdi
+ adcq $0, %r14
+ adcq $0, %rsi
+ imulq %rax, %r15
+ movq %r15, %rdx
+ movq -16(%rsp), %r13 # 8-byte Reload
+ mulxq %r13, %r8, %rcx
+ movq %r15, %rdx
+ mulxq %r10, %r11, %r12
+ movq %r15, %rdx
+ mulxq %r9, %r15, %rdx
+ addq %r11, %rdx
+ adcq %r8, %r12
+ adcq $0, %rcx
+ addq %rax, %r15
+ adcq %rbx, %rdx
+ adcq %rdi, %r12
+ adcq %r14, %rcx
+ adcq $0, %rsi
+ movq %rdx, %rax
+ subq %r9, %rax
+ movq %r12, %rdi
+ sbbq %r10, %rdi
+ movq %rcx, %rbp
+ sbbq %r13, %rbp
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rcx, %rbp
+ testb %sil, %sil
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rcx # 8-byte Reload
+ movq %rax, (%rcx)
+ cmovneq %r12, %rdi
+ movq %rdi, 8(%rcx)
+ movq %rbp, 16(%rcx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end40:
+ .size mcl_fp_montRed3Lbmi2, .Lfunc_end40-mcl_fp_montRed3Lbmi2
+
+ .globl mcl_fp_addPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre3Lbmi2,@function
+mcl_fp_addPre3Lbmi2: # @mcl_fp_addPre3Lbmi2
+# BB#0:
+ movq 16(%rdx), %rax
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rax
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end41:
+ .size mcl_fp_addPre3Lbmi2, .Lfunc_end41-mcl_fp_addPre3Lbmi2
+
+ .globl mcl_fp_subPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre3Lbmi2,@function
+mcl_fp_subPre3Lbmi2: # @mcl_fp_subPre3Lbmi2
+# BB#0:
+ movq 16(%rsi), %r8
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r8
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end42:
+ .size mcl_fp_subPre3Lbmi2, .Lfunc_end42-mcl_fp_subPre3Lbmi2
+
+ .globl mcl_fp_shr1_3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_3Lbmi2,@function
+mcl_fp_shr1_3Lbmi2: # @mcl_fp_shr1_3Lbmi2
+# BB#0:
+ movq 16(%rsi), %rax
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rdx
+ shrdq $1, %rdx, %rcx
+ movq %rcx, (%rdi)
+ shrdq $1, %rax, %rdx
+ movq %rdx, 8(%rdi)
+ shrq %rax
+ movq %rax, 16(%rdi)
+ retq
+.Lfunc_end43:
+ .size mcl_fp_shr1_3Lbmi2, .Lfunc_end43-mcl_fp_shr1_3Lbmi2
+
+ .globl mcl_fp_add3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add3Lbmi2,@function
+mcl_fp_add3Lbmi2: # @mcl_fp_add3Lbmi2
+# BB#0:
+ movq 16(%rdx), %r8
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r8
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB44_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r8, 16(%rdi)
+.LBB44_2: # %carry
+ retq
+.Lfunc_end44:
+ .size mcl_fp_add3Lbmi2, .Lfunc_end44-mcl_fp_add3Lbmi2
+
+ .globl mcl_fp_addNF3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF3Lbmi2,@function
+mcl_fp_addNF3Lbmi2: # @mcl_fp_addNF3Lbmi2
+# BB#0:
+ movq 16(%rdx), %r8
+ movq (%rdx), %r10
+ movq 8(%rdx), %r9
+ addq (%rsi), %r10
+ adcq 8(%rsi), %r9
+ adcq 16(%rsi), %r8
+ movq %r10, %rsi
+ subq (%rcx), %rsi
+ movq %r9, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r8, %rax
+ sbbq 16(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r10, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 16(%rdi)
+ retq
+.Lfunc_end45:
+ .size mcl_fp_addNF3Lbmi2, .Lfunc_end45-mcl_fp_addNF3Lbmi2
+
+ .globl mcl_fp_sub3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub3Lbmi2,@function
+mcl_fp_sub3Lbmi2: # @mcl_fp_sub3Lbmi2
+# BB#0:
+ movq 16(%rsi), %r8
+ movq (%rsi), %rax
+ movq 8(%rsi), %r9
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r9
+ sbbq 16(%rdx), %r8
+ movq %rax, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB46_2
+# BB#1: # %nocarry
+ retq
+.LBB46_2: # %carry
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rsi
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r8, %rsi
+ movq %rsi, 16(%rdi)
+ retq
+.Lfunc_end46:
+ .size mcl_fp_sub3Lbmi2, .Lfunc_end46-mcl_fp_sub3Lbmi2
+
+ .globl mcl_fp_subNF3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF3Lbmi2,@function
+mcl_fp_subNF3Lbmi2: # @mcl_fp_subNF3Lbmi2
+# BB#0:
+ movq 16(%rsi), %r10
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ subq (%rdx), %r8
+ sbbq 8(%rdx), %r9
+ sbbq 16(%rdx), %r10
+ movq %r10, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rsi
+ shldq $1, %r10, %rsi
+ andq (%rcx), %rsi
+ movq 16(%rcx), %rax
+ andq %rdx, %rax
+ andq 8(%rcx), %rdx
+ addq %r8, %rsi
+ movq %rsi, (%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r10, %rax
+ movq %rax, 16(%rdi)
+ retq
+.Lfunc_end47:
+ .size mcl_fp_subNF3Lbmi2, .Lfunc_end47-mcl_fp_subNF3Lbmi2
+
+ .globl mcl_fpDbl_add3Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add3Lbmi2,@function
+mcl_fpDbl_add3Lbmi2: # @mcl_fpDbl_add3Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r10
+ movq 40(%rsi), %r8
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r9
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r14, %r15
+ adcq %r11, %r9
+ adcq %r10, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r15, %rdx
+ subq (%rcx), %rdx
+ movq %r9, %rsi
+ sbbq 8(%rcx), %rsi
+ movq %r8, %rbx
+ sbbq 16(%rcx), %rbx
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %r15, %rdx
+ movq %rdx, 24(%rdi)
+ testb %al, %al
+ cmovneq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ cmovneq %r8, %rbx
+ movq %rbx, 40(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end48:
+ .size mcl_fpDbl_add3Lbmi2, .Lfunc_end48-mcl_fpDbl_add3Lbmi2
+
+ .globl mcl_fpDbl_sub3Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub3Lbmi2,@function
+mcl_fpDbl_sub3Lbmi2: # @mcl_fpDbl_sub3Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r10
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rax
+ xorl %esi, %esi
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rax
+ movq 24(%rdx), %r15
+ movq 32(%rdx), %r12
+ sbbq 16(%rdx), %r14
+ movq %rbx, (%rdi)
+ movq %rax, 8(%rdi)
+ movq %r14, 16(%rdi)
+ sbbq %r15, %r11
+ sbbq %r12, %r9
+ sbbq %r10, %r8
+ movl $0, %eax
+ sbbq $0, %rax
+ andl $1, %eax
+ movq (%rcx), %rdx
+ cmoveq %rsi, %rdx
+ testb %al, %al
+ movq 16(%rcx), %rax
+ cmoveq %rsi, %rax
+ cmovneq 8(%rcx), %rsi
+ addq %r11, %rdx
+ movq %rdx, 24(%rdi)
+ adcq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r8, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end49:
+ .size mcl_fpDbl_sub3Lbmi2, .Lfunc_end49-mcl_fpDbl_sub3Lbmi2
+
+ .globl mcl_fp_mulUnitPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre4Lbmi2,@function
+mcl_fp_mulUnitPre4Lbmi2: # @mcl_fp_mulUnitPre4Lbmi2
+# BB#0:
+ mulxq 24(%rsi), %r8, %r11
+ mulxq 16(%rsi), %r9, %rax
+ mulxq 8(%rsi), %r10, %rcx
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %r10, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r9, %rcx
+ movq %rcx, 16(%rdi)
+ adcq %r8, %rax
+ movq %rax, 24(%rdi)
+ adcq $0, %r11
+ movq %r11, 32(%rdi)
+ retq
+.Lfunc_end50:
+ .size mcl_fp_mulUnitPre4Lbmi2, .Lfunc_end50-mcl_fp_mulUnitPre4Lbmi2
+
+ .globl mcl_fpDbl_mulPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre4Lbmi2,@function
+mcl_fpDbl_mulPre4Lbmi2: # @mcl_fpDbl_mulPre4Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq (%rsi), %r14
+ movq 8(%rsi), %r10
+ movq (%rdx), %rcx
+ movq %rdx, %rbp
+ movq %r14, %rdx
+ mulxq %rcx, %rdx, %r15
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r9
+ movq %rdx, (%rdi)
+ movq %r10, %rdx
+ mulxq %rcx, %rbx, %r12
+ addq %r15, %rbx
+ movq %r9, %rdx
+ mulxq %rcx, %r13, %r15
+ adcq %r12, %r13
+ movq %r11, %rdx
+ mulxq %rcx, %rcx, %r12
+ adcq %r15, %rcx
+ adcq $0, %r12
+ movq 8(%rbp), %rax
+ movq %r14, %rdx
+ mulxq %rax, %r8, %rdx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ addq %rbx, %r8
+ movq %r10, %rdx
+ mulxq %rax, %r15, %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ adcq %r13, %r15
+ movq %r9, %rdx
+ mulxq %rax, %rbx, %r13
+ adcq %rcx, %rbx
+ movq %r11, %rdx
+ mulxq %rax, %rcx, %rax
+ adcq %r12, %rcx
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -8(%rsp), %r15 # 8-byte Folded Reload
+ adcq -16(%rsp), %rbx # 8-byte Folded Reload
+ adcq %r13, %rcx
+ movq %r8, 8(%rdi)
+ adcq %rax, %r12
+ movq %rbp, %r13
+ movq 16(%r13), %rax
+ movq %r14, %rdx
+ mulxq %rax, %rdx, %r8
+ addq %r15, %rdx
+ movq %rdx, 16(%rdi)
+ movq %r10, %rdx
+ mulxq %rax, %rbp, %r10
+ adcq %rbx, %rbp
+ movq %r11, %rdx
+ mulxq %rax, %r14, %r11
+ movq %r9, %rdx
+ mulxq %rax, %r15, %rdx
+ adcq %rcx, %r15
+ adcq %r12, %r14
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r8, %rbp
+ adcq %r10, %r15
+ adcq %rdx, %r14
+ adcq %r11, %rcx
+ movq 24(%r13), %rdx
+ mulxq 24(%rsi), %rbx, %r8
+ mulxq (%rsi), %rax, %r9
+ addq %rbp, %rax
+ mulxq 16(%rsi), %rbp, %r10
+ mulxq 8(%rsi), %rsi, %rdx
+ movq %rax, 24(%rdi)
+ adcq %r15, %rsi
+ adcq %r14, %rbp
+ adcq %rcx, %rbx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %rdx, %rbp
+ movq %rbp, 40(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r8, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end51:
+ .size mcl_fpDbl_mulPre4Lbmi2, .Lfunc_end51-mcl_fpDbl_mulPre4Lbmi2
+
+ .globl mcl_fpDbl_sqrPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre4Lbmi2,@function
+mcl_fpDbl_sqrPre4Lbmi2: # @mcl_fpDbl_sqrPre4Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 24(%rsi), %r8
+ movq 16(%rsi), %r9
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rax
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %r11
+ movq %rdx, (%rdi)
+ movq %r9, %rdx
+ mulxq %rcx, %rbp, %r10
+ movq %rbp, -16(%rsp) # 8-byte Spill
+ movq %r10, -8(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rcx, %r12, %r15
+ addq %r12, %r11
+ movq %r15, %rbx
+ adcq %rbp, %rbx
+ movq %r8, %rdx
+ mulxq %rcx, %rcx, %r13
+ adcq %r10, %rcx
+ adcq $0, %r13
+ addq %r12, %r11
+ movq %rax, %rdx
+ mulxq %rax, %rbp, %r12
+ adcq %rbx, %rbp
+ movq %r8, %rdx
+ mulxq %rax, %r10, %rbx
+ movq %r9, %rdx
+ mulxq %rax, %r14, %rdx
+ adcq %r14, %rcx
+ adcq %r13, %r10
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r15, %rbp
+ adcq %r12, %rcx
+ adcq %rdx, %r10
+ movq %rdx, %r12
+ adcq %rbx, %rax
+ movq %r11, 8(%rdi)
+ addq -16(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rdi)
+ movq %r8, %rdx
+ mulxq %r9, %r11, %r8
+ movq %r9, %rdx
+ mulxq %r9, %r15, %rdx
+ adcq %r14, %rcx
+ adcq %r10, %r15
+ adcq %rax, %r11
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq -8(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r12, %r15
+ adcq %rdx, %r11
+ adcq %r8, %rax
+ movq 24(%rsi), %rdx
+ mulxq 16(%rsi), %rbx, %r8
+ mulxq 8(%rsi), %rbp, %r9
+ mulxq (%rsi), %rsi, %r10
+ addq %rcx, %rsi
+ movq %rsi, 24(%rdi)
+ adcq %r15, %rbp
+ adcq %r11, %rbx
+ mulxq %rdx, %rdx, %rcx
+ adcq %rax, %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r10, %rbp
+ movq %rbp, 32(%rdi)
+ adcq %r9, %rbx
+ movq %rbx, 40(%rdi)
+ adcq %r8, %rdx
+ movq %rdx, 48(%rdi)
+ adcq %rcx, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end52:
+ .size mcl_fpDbl_sqrPre4Lbmi2, .Lfunc_end52-mcl_fpDbl_sqrPre4Lbmi2
+
+ .globl mcl_fp_mont4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont4Lbmi2,@function
+mcl_fp_mont4Lbmi2: # @mcl_fp_mont4Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rdi
+ movq %rdi, -40(%rsp) # 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %r10, %r15
+ movq 16(%rsi), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rax, %rbx, %r11
+ movq (%rsi), %rdi
+ movq %rdi, -56(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ mulxq %rax, %rbp, %r14
+ movq %rdi, %rdx
+ mulxq %rax, %r13, %r12
+ addq %rbp, %r12
+ adcq %rbx, %r14
+ adcq %r10, %r11
+ adcq $0, %r15
+ movq -8(%rcx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq (%rcx), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ movq %r13, %rdx
+ imulq %rax, %rdx
+ movq 24(%rcx), %rsi
+ movq %rsi, -72(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rbp
+ movq %rbp, -8(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ mulxq %rsi, %r10, %r8
+ mulxq %rbp, %r9, %rbx
+ mulxq %rax, %rsi, %rcx
+ mulxq %rdi, %rdx, %rbp
+ addq %rsi, %rbp
+ adcq %r9, %rcx
+ adcq %r10, %rbx
+ adcq $0, %r8
+ addq %r13, %rdx
+ adcq %r12, %rbp
+ adcq %r14, %rcx
+ adcq %r11, %rbx
+ adcq %r15, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 8(%rdx), %rdx
+ mulxq -40(%rsp), %r12, %r14 # 8-byte Folded Reload
+ mulxq -48(%rsp), %r15, %r11 # 8-byte Folded Reload
+ mulxq -64(%rsp), %r9, %rdi # 8-byte Folded Reload
+ mulxq -56(%rsp), %r10, %rsi # 8-byte Folded Reload
+ addq %r9, %rsi
+ adcq %r15, %rdi
+ adcq %r12, %r11
+ adcq $0, %r14
+ addq %rbp, %r10
+ adcq %rcx, %rsi
+ adcq %rbx, %rdi
+ adcq %r8, %r11
+ adcq %rax, %r14
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ movq %r10, %rdx
+ imulq -16(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -72(%rsp), %r15, %r9 # 8-byte Folded Reload
+ mulxq -8(%rsp), %r12, %r8 # 8-byte Folded Reload
+ movq -80(%rsp), %r13 # 8-byte Reload
+ mulxq %r13, %rbp, %rcx
+ mulxq -24(%rsp), %rdx, %rax # 8-byte Folded Reload
+ addq %rbp, %rax
+ adcq %r12, %rcx
+ adcq %r15, %r8
+ adcq $0, %r9
+ addq %r10, %rdx
+ adcq %rsi, %rax
+ adcq %rdi, %rcx
+ adcq %r11, %r8
+ adcq %r14, %r9
+ adcq $0, %rbx
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq -40(%rsp), %r15, %r11 # 8-byte Folded Reload
+ mulxq -48(%rsp), %r12, %r14 # 8-byte Folded Reload
+ mulxq -64(%rsp), %rsi, %rbp # 8-byte Folded Reload
+ mulxq -56(%rsp), %r10, %rdi # 8-byte Folded Reload
+ addq %rsi, %rdi
+ adcq %r12, %rbp
+ adcq %r15, %r14
+ adcq $0, %r11
+ addq %rax, %r10
+ adcq %rcx, %rdi
+ adcq %r8, %rbp
+ adcq %r9, %r14
+ adcq %rbx, %r11
+ sbbq %rbx, %rbx
+ movq %r10, %rdx
+ imulq -16(%rsp), %rdx # 8-byte Folded Reload
+ mulxq %r13, %rcx, %rsi
+ mulxq -24(%rsp), %r8, %rax # 8-byte Folded Reload
+ addq %rcx, %rax
+ mulxq -8(%rsp), %rcx, %r15 # 8-byte Folded Reload
+ adcq %rsi, %rcx
+ movq -72(%rsp), %r13 # 8-byte Reload
+ mulxq %r13, %r9, %rsi
+ adcq %r15, %r9
+ adcq $0, %rsi
+ andl $1, %ebx
+ addq %r10, %r8
+ adcq %rdi, %rax
+ adcq %rbp, %rcx
+ adcq %r14, %r9
+ adcq %r11, %rsi
+ adcq $0, %rbx
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 24(%rdx), %rdx
+ mulxq -40(%rsp), %r11, %r8 # 8-byte Folded Reload
+ mulxq -48(%rsp), %r15, %rdi # 8-byte Folded Reload
+ mulxq -64(%rsp), %r12, %r14 # 8-byte Folded Reload
+ mulxq -56(%rsp), %r10, %rbp # 8-byte Folded Reload
+ addq %r12, %rbp
+ adcq %r15, %r14
+ adcq %r11, %rdi
+ adcq $0, %r8
+ addq %rax, %r10
+ adcq %rcx, %rbp
+ adcq %r9, %r14
+ adcq %rsi, %rdi
+ adcq %rbx, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq -16(%rsp), %rdx # 8-byte Reload
+ imulq %r10, %rdx
+ mulxq %r13, %rcx, %rsi
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ mulxq -8(%rsp), %r11, %rbx # 8-byte Folded Reload
+ mulxq -80(%rsp), %r15, %rcx # 8-byte Folded Reload
+ movq -24(%rsp), %r9 # 8-byte Reload
+ mulxq %r9, %r12, %r13
+ addq %r15, %r13
+ adcq %r11, %rcx
+ adcq -16(%rsp), %rbx # 8-byte Folded Reload
+ adcq $0, %rsi
+ addq %r10, %r12
+ adcq %rbp, %r13
+ adcq %r14, %rcx
+ adcq %rdi, %rbx
+ adcq %r8, %rsi
+ adcq $0, %rax
+ movq %r13, %rdi
+ subq %r9, %rdi
+ movq %rcx, %rbp
+ sbbq -80(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbx, %r8
+ sbbq -8(%rsp), %r8 # 8-byte Folded Reload
+ movq %rsi, %rdx
+ sbbq -72(%rsp), %rdx # 8-byte Folded Reload
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rsi, %rdx
+ testb %al, %al
+ cmovneq %r13, %rdi
+ movq -88(%rsp), %rax # 8-byte Reload
+ movq %rdi, (%rax)
+ cmovneq %rcx, %rbp
+ movq %rbp, 8(%rax)
+ cmovneq %rbx, %r8
+ movq %r8, 16(%rax)
+ movq %rdx, 24(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end53:
+ .size mcl_fp_mont4Lbmi2, .Lfunc_end53-mcl_fp_mont4Lbmi2
+
+ .globl mcl_fp_montNF4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF4Lbmi2,@function
+mcl_fp_montNF4Lbmi2: # @mcl_fp_montNF4Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ movq (%rsi), %rdi
+ movq %rdi, -64(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rbp
+ movq %rbp, -72(%rsp) # 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdx, %r15
+ movq %rbp, %rdx
+ mulxq %rax, %rbp, %r9
+ movq %rdi, %rdx
+ mulxq %rax, %r12, %rbx
+ movq 16(%rsi), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ addq %rbp, %rbx
+ mulxq %rax, %r14, %rbp
+ adcq %r9, %r14
+ movq 24(%rsi), %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ mulxq %rax, %r8, %rdi
+ adcq %rbp, %r8
+ adcq $0, %rdi
+ movq -8(%rcx), %r13
+ movq (%rcx), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %r12, %rdx
+ imulq %r13, %rdx
+ mulxq %rax, %rax, %r11
+ addq %r12, %rax
+ movq 8(%rcx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulxq %rax, %rbp, %r10
+ adcq %rbx, %rbp
+ movq 16(%rcx), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ mulxq %rax, %rsi, %rbx
+ adcq %r14, %rsi
+ movq 24(%rcx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ mulxq %rax, %rcx, %rdx
+ adcq %r8, %rcx
+ adcq $0, %rdi
+ addq %r11, %rbp
+ adcq %r10, %rsi
+ adcq %rbx, %rcx
+ adcq %rdx, %rdi
+ movq 8(%r15), %rdx
+ movq -72(%rsp), %r12 # 8-byte Reload
+ mulxq %r12, %rbx, %r9
+ movq -64(%rsp), %r15 # 8-byte Reload
+ mulxq %r15, %r10, %r11
+ addq %rbx, %r11
+ mulxq -48(%rsp), %rax, %r8 # 8-byte Folded Reload
+ adcq %r9, %rax
+ mulxq -16(%rsp), %r9, %rbx # 8-byte Folded Reload
+ adcq %r8, %r9
+ adcq $0, %rbx
+ addq %rbp, %r10
+ adcq %rsi, %r11
+ adcq %rcx, %rax
+ adcq %rdi, %r9
+ adcq $0, %rbx
+ movq %r10, %rdx
+ imulq %r13, %rdx
+ movq -56(%rsp), %r14 # 8-byte Reload
+ mulxq %r14, %rcx, %r8
+ addq %r10, %rcx
+ mulxq -24(%rsp), %r10, %rdi # 8-byte Folded Reload
+ adcq %r11, %r10
+ mulxq -40(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ adcq %rax, %rcx
+ mulxq -8(%rsp), %rax, %rdx # 8-byte Folded Reload
+ adcq %r9, %rax
+ adcq $0, %rbx
+ addq %r8, %r10
+ adcq %rdi, %rcx
+ adcq %rsi, %rax
+ adcq %rdx, %rbx
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq %r12, %rsi, %r8
+ mulxq %r15, %r11, %rbp
+ addq %rsi, %rbp
+ movq -48(%rsp), %r12 # 8-byte Reload
+ mulxq %r12, %rdi, %r9
+ adcq %r8, %rdi
+ mulxq -16(%rsp), %r8, %rsi # 8-byte Folded Reload
+ adcq %r9, %r8
+ adcq $0, %rsi
+ addq %r10, %r11
+ adcq %rcx, %rbp
+ adcq %rax, %rdi
+ adcq %rbx, %r8
+ adcq $0, %rsi
+ movq %r11, %rdx
+ imulq %r13, %rdx
+ mulxq %r14, %rax, %r10
+ addq %r11, %rax
+ movq -24(%rsp), %r14 # 8-byte Reload
+ mulxq %r14, %r9, %rbx
+ adcq %rbp, %r9
+ movq -40(%rsp), %r15 # 8-byte Reload
+ mulxq %r15, %rax, %rbp
+ adcq %rdi, %rax
+ mulxq -8(%rsp), %rcx, %rdx # 8-byte Folded Reload
+ adcq %r8, %rcx
+ adcq $0, %rsi
+ addq %r10, %r9
+ adcq %rbx, %rax
+ adcq %rbp, %rcx
+ adcq %rdx, %rsi
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 24(%rdx), %rdx
+ mulxq -72(%rsp), %rbx, %r8 # 8-byte Folded Reload
+ mulxq -64(%rsp), %r11, %rbp # 8-byte Folded Reload
+ addq %rbx, %rbp
+ mulxq %r12, %rdi, %r10
+ adcq %r8, %rdi
+ mulxq -16(%rsp), %r8, %rbx # 8-byte Folded Reload
+ adcq %r10, %r8
+ adcq $0, %rbx
+ addq %r9, %r11
+ adcq %rax, %rbp
+ adcq %rcx, %rdi
+ adcq %rsi, %r8
+ adcq $0, %rbx
+ imulq %r11, %r13
+ movq %r13, %rdx
+ movq -56(%rsp), %r12 # 8-byte Reload
+ mulxq %r12, %rcx, %r9
+ addq %r11, %rcx
+ movq %r13, %rdx
+ mulxq %r14, %r11, %r10
+ adcq %rbp, %r11
+ movq %r13, %rdx
+ movq %r15, %rsi
+ mulxq %rsi, %rax, %rcx
+ adcq %rdi, %rax
+ movq %r13, %rdx
+ movq -8(%rsp), %rbp # 8-byte Reload
+ mulxq %rbp, %r15, %rdx
+ adcq %r8, %r15
+ adcq $0, %rbx
+ addq %r9, %r11
+ adcq %r10, %rax
+ adcq %rcx, %r15
+ adcq %rdx, %rbx
+ movq %r11, %rcx
+ subq %r12, %rcx
+ movq %rax, %rdx
+ sbbq %r14, %rdx
+ movq %r15, %rdi
+ sbbq %rsi, %rdi
+ movq %rbx, %rsi
+ sbbq %rbp, %rsi
+ cmovsq %r11, %rcx
+ movq -80(%rsp), %rbp # 8-byte Reload
+ movq %rcx, (%rbp)
+ cmovsq %rax, %rdx
+ movq %rdx, 8(%rbp)
+ cmovsq %r15, %rdi
+ movq %rdi, 16(%rbp)
+ cmovsq %rbx, %rsi
+ movq %rsi, 24(%rbp)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end54:
+ .size mcl_fp_montNF4Lbmi2, .Lfunc_end54-mcl_fp_montNF4Lbmi2
+
+ .globl mcl_fp_montRed4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed4Lbmi2,@function
+mcl_fp_montRed4Lbmi2: # @mcl_fp_montRed4Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -56(%rsp) # 8-byte Spill
+ movq -8(%rcx), %r13
+ movq (%rcx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq (%rsi), %r10
+ movq %r10, %rdx
+ imulq %r13, %rdx
+ movq 24(%rcx), %rdi
+ movq %rdi, -48(%rsp) # 8-byte Spill
+ mulxq %rdi, %r9, %r15
+ movq %rdi, %r14
+ movq 16(%rcx), %rdi
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ mulxq %rdi, %rdi, %rbx
+ movq 8(%rcx), %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ mulxq %rcx, %rcx, %r8
+ mulxq %rax, %rdx, %rbp
+ addq %rcx, %rbp
+ adcq %rdi, %r8
+ adcq %r9, %rbx
+ adcq $0, %r15
+ movq 56(%rsi), %r11
+ movq 48(%rsi), %rcx
+ addq %r10, %rdx
+ movq 40(%rsi), %r12
+ adcq 8(%rsi), %rbp
+ adcq 16(%rsi), %r8
+ adcq 24(%rsi), %rbx
+ adcq 32(%rsi), %r15
+ adcq $0, %r12
+ adcq $0, %rcx
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ adcq $0, %r11
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rbp, %rdx
+ imulq %r13, %rdx
+ mulxq %r14, %rax, %r9
+ movq %rax, -40(%rsp) # 8-byte Spill
+ mulxq -8(%rsp), %r14, %rdi # 8-byte Folded Reload
+ mulxq -16(%rsp), %r10, %rcx # 8-byte Folded Reload
+ mulxq -24(%rsp), %rdx, %rax # 8-byte Folded Reload
+ addq %r10, %rax
+ adcq %r14, %rcx
+ adcq -40(%rsp), %rdi # 8-byte Folded Reload
+ adcq $0, %r9
+ addq %rbp, %rdx
+ adcq %r8, %rax
+ adcq %rbx, %rcx
+ adcq %r15, %rdi
+ adcq %r12, %r9
+ adcq $0, -32(%rsp) # 8-byte Folded Spill
+ adcq $0, %r11
+ movq %r11, -40(%rsp) # 8-byte Spill
+ adcq $0, %rsi
+ movq %rax, %rdx
+ imulq %r13, %rdx
+ movq -48(%rsp), %r15 # 8-byte Reload
+ mulxq %r15, %rbp, %r8
+ movq %rbp, -64(%rsp) # 8-byte Spill
+ movq -8(%rsp), %r11 # 8-byte Reload
+ mulxq %r11, %rbx, %r10
+ movq %rbx, -72(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %r12, %rbp # 8-byte Folded Reload
+ movq -24(%rsp), %r14 # 8-byte Reload
+ mulxq %r14, %rdx, %rbx
+ addq %r12, %rbx
+ adcq -72(%rsp), %rbp # 8-byte Folded Reload
+ adcq -64(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r8
+ addq %rax, %rdx
+ adcq %rcx, %rbx
+ adcq %rdi, %rbp
+ adcq %r9, %r10
+ adcq -32(%rsp), %r8 # 8-byte Folded Reload
+ adcq $0, -40(%rsp) # 8-byte Folded Spill
+ adcq $0, %rsi
+ imulq %rbx, %r13
+ movq %r13, %rdx
+ mulxq %r15, %rax, %rdi
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq %r13, %rdx
+ mulxq %r11, %r9, %rax
+ movq %r13, %rdx
+ movq -16(%rsp), %r11 # 8-byte Reload
+ mulxq %r11, %r12, %rcx
+ movq %r13, %rdx
+ mulxq %r14, %r15, %r13
+ addq %r12, %r13
+ adcq %r9, %rcx
+ adcq -32(%rsp), %rax # 8-byte Folded Reload
+ adcq $0, %rdi
+ addq %rbx, %r15
+ adcq %rbp, %r13
+ adcq %r10, %rcx
+ adcq %r8, %rax
+ adcq -40(%rsp), %rdi # 8-byte Folded Reload
+ adcq $0, %rsi
+ movq %r13, %rdx
+ subq %r14, %rdx
+ movq %rcx, %rbp
+ sbbq %r11, %rbp
+ movq %rax, %r8
+ sbbq -8(%rsp), %r8 # 8-byte Folded Reload
+ movq %rdi, %rbx
+ sbbq -48(%rsp), %rbx # 8-byte Folded Reload
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rdi, %rbx
+ testb %sil, %sil
+ cmovneq %r13, %rdx
+ movq -56(%rsp), %rsi # 8-byte Reload
+ movq %rdx, (%rsi)
+ cmovneq %rcx, %rbp
+ movq %rbp, 8(%rsi)
+ cmovneq %rax, %r8
+ movq %r8, 16(%rsi)
+ movq %rbx, 24(%rsi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end55:
+ .size mcl_fp_montRed4Lbmi2, .Lfunc_end55-mcl_fp_montRed4Lbmi2
+
+ .globl mcl_fp_addPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre4Lbmi2,@function
+mcl_fp_addPre4Lbmi2: # @mcl_fp_addPre4Lbmi2
+# BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rdx), %rax
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rax
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ adcq %r8, %r9
+ movq %r9, 24(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end56:
+ .size mcl_fp_addPre4Lbmi2, .Lfunc_end56-mcl_fp_addPre4Lbmi2
+
+ .globl mcl_fp_subPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre4Lbmi2,@function
+mcl_fp_subPre4Lbmi2: # @mcl_fp_subPre4Lbmi2
+# BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r10, 16(%rdi)
+ sbbq %r8, %r9
+ movq %r9, 24(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end57:
+ .size mcl_fp_subPre4Lbmi2, .Lfunc_end57-mcl_fp_subPre4Lbmi2
+
+ .globl mcl_fp_shr1_4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_4Lbmi2,@function
+mcl_fp_shr1_4Lbmi2: # @mcl_fp_shr1_4Lbmi2
+# BB#0:
+ movq 24(%rsi), %rax
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rdx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rdx
+ movq %rdx, (%rdi)
+ shrdq $1, %rcx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rax, %rcx
+ movq %rcx, 16(%rdi)
+ shrq %rax
+ movq %rax, 24(%rdi)
+ retq
+.Lfunc_end58:
+ .size mcl_fp_shr1_4Lbmi2, .Lfunc_end58-mcl_fp_shr1_4Lbmi2
+
+ .globl mcl_fp_add4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add4Lbmi2,@function
+mcl_fp_add4Lbmi2: # @mcl_fp_add4Lbmi2
+# BB#0:
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %r8
+ movq 16(%rdx), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r9
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r9, 16(%rdi)
+ adcq %r10, %r8
+ movq %r8, 24(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r9
+ sbbq 24(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB59_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r9, 16(%rdi)
+ movq %r8, 24(%rdi)
+.LBB59_2: # %carry
+ retq
+.Lfunc_end59:
+ .size mcl_fp_add4Lbmi2, .Lfunc_end59-mcl_fp_add4Lbmi2
+
+ .globl mcl_fp_addNF4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF4Lbmi2,@function
+mcl_fp_addNF4Lbmi2: # @mcl_fp_addNF4Lbmi2
+# BB#0:
+ pushq %rbx
+ movq 24(%rdx), %r8
+ movq 16(%rdx), %r9
+ movq (%rdx), %r11
+ movq 8(%rdx), %r10
+ addq (%rsi), %r11
+ adcq 8(%rsi), %r10
+ adcq 16(%rsi), %r9
+ adcq 24(%rsi), %r8
+ movq %r11, %rsi
+ subq (%rcx), %rsi
+ movq %r10, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r9, %rax
+ sbbq 16(%rcx), %rax
+ movq %r8, %rbx
+ sbbq 24(%rcx), %rbx
+ testq %rbx, %rbx
+ cmovsq %r11, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r10, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r9, %rax
+ movq %rax, 16(%rdi)
+ cmovsq %r8, %rbx
+ movq %rbx, 24(%rdi)
+ popq %rbx
+ retq
+.Lfunc_end60:
+ .size mcl_fp_addNF4Lbmi2, .Lfunc_end60-mcl_fp_addNF4Lbmi2
+
+ .globl mcl_fp_sub4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub4Lbmi2,@function
+mcl_fp_sub4Lbmi2: # @mcl_fp_sub4Lbmi2
+# BB#0:
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %r8
+ movq 16(%rsi), %r9
+ movq (%rsi), %rax
+ movq 8(%rsi), %r11
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r9
+ movq %rax, (%rdi)
+ movq %r11, 8(%rdi)
+ movq %r9, 16(%rdi)
+ sbbq %r10, %r8
+ movq %r8, 24(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB61_2
+# BB#1: # %nocarry
+ retq
+.LBB61_2: # %carry
+ movq 24(%rcx), %r10
+ movq 8(%rcx), %rsi
+ movq 16(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r8, %r10
+ movq %r10, 24(%rdi)
+ retq
+.Lfunc_end61:
+ .size mcl_fp_sub4Lbmi2, .Lfunc_end61-mcl_fp_sub4Lbmi2
+
+ .globl mcl_fp_subNF4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF4Lbmi2,@function
+mcl_fp_subNF4Lbmi2: # @mcl_fp_subNF4Lbmi2
+# BB#0:
+ pushq %rbx
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r8
+ movq (%rsi), %r9
+ movq 8(%rsi), %r10
+ subq (%rdx), %r9
+ sbbq 8(%rdx), %r10
+ sbbq 16(%rdx), %r8
+ sbbq 24(%rdx), %r11
+ movq %r11, %rdx
+ sarq $63, %rdx
+ movq 24(%rcx), %rsi
+ andq %rdx, %rsi
+ movq 16(%rcx), %rax
+ andq %rdx, %rax
+ movq 8(%rcx), %rbx
+ andq %rdx, %rbx
+ andq (%rcx), %rdx
+ addq %r9, %rdx
+ movq %rdx, (%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r8, %rax
+ movq %rax, 16(%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ popq %rbx
+ retq
+.Lfunc_end62:
+ .size mcl_fp_subNF4Lbmi2, .Lfunc_end62-mcl_fp_subNF4Lbmi2
+
+ .globl mcl_fpDbl_add4Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add4Lbmi2,@function
+mcl_fpDbl_add4Lbmi2: # @mcl_fpDbl_add4Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r9
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r10
+ movq 48(%rsi), %r12
+ movq 40(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq 24(%rdx), %r15
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %rsi
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r15, %rbp
+ movq %rbp, 24(%rdi)
+ adcq %r14, %rsi
+ adcq %r11, %r13
+ adcq %r10, %r12
+ adcq %r9, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rsi, %rdx
+ subq (%rcx), %rdx
+ movq %r13, %rbp
+ sbbq 8(%rcx), %rbp
+ movq %r12, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r8, %r9
+ sbbq 24(%rcx), %r9
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rsi, %rdx
+ movq %rdx, 32(%rdi)
+ testb %al, %al
+ cmovneq %r13, %rbp
+ movq %rbp, 40(%rdi)
+ cmovneq %r12, %rbx
+ movq %rbx, 48(%rdi)
+ cmovneq %r8, %r9
+ movq %r9, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end63:
+ .size mcl_fpDbl_add4Lbmi2, .Lfunc_end63-mcl_fpDbl_add4Lbmi2
+
+ .globl mcl_fpDbl_sub4Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub4Lbmi2,@function
+mcl_fpDbl_sub4Lbmi2: # @mcl_fpDbl_sub4Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r9
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq (%rsi), %rbx
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ movq %rbx, (%rdi)
+ movq 8(%rsi), %rbx
+ sbbq 8(%rdx), %rbx
+ movq %rbx, 8(%rdi)
+ movq 16(%rsi), %rbx
+ sbbq 16(%rdx), %rbx
+ movq %rbx, 16(%rdi)
+ movq 24(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 40(%rdx), %r11
+ movq 32(%rdx), %rdx
+ movq %rbx, 24(%rdi)
+ movq 32(%rsi), %r12
+ sbbq %rdx, %r12
+ movq 48(%rsi), %r14
+ movq 40(%rsi), %r15
+ sbbq %r11, %r15
+ sbbq %r10, %r14
+ sbbq %r9, %r8
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ movq 16(%rcx), %rdx
+ cmoveq %rax, %rdx
+ movq 24(%rcx), %rbx
+ cmoveq %rax, %rbx
+ cmovneq 8(%rcx), %rax
+ addq %r12, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r15, %rax
+ movq %rax, 40(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 48(%rdi)
+ adcq %r8, %rbx
+ movq %rbx, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end64:
+ .size mcl_fpDbl_sub4Lbmi2, .Lfunc_end64-mcl_fpDbl_sub4Lbmi2
+
+ .globl mcl_fp_mulUnitPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre5Lbmi2,@function
+mcl_fp_mulUnitPre5Lbmi2: # @mcl_fp_mulUnitPre5Lbmi2
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ mulxq 32(%rsi), %r8, %r11
+ mulxq 24(%rsi), %r9, %rax
+ mulxq 16(%rsi), %r10, %rcx
+ mulxq 8(%rsi), %r14, %rbx
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %r14, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r9, %rcx
+ movq %rcx, 24(%rdi)
+ adcq %r8, %rax
+ movq %rax, 32(%rdi)
+ adcq $0, %r11
+ movq %r11, 40(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end65:
+ .size mcl_fp_mulUnitPre5Lbmi2, .Lfunc_end65-mcl_fp_mulUnitPre5Lbmi2
+
+ .globl mcl_fpDbl_mulPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre5Lbmi2,@function
+mcl_fpDbl_mulPre5Lbmi2: # @mcl_fpDbl_mulPre5Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq (%rsi), %r11
+ movq 8(%rsi), %r10
+ movq (%rdx), %rcx
+ movq %r10, %rdx
+ mulxq %rcx, %rax, %r14
+ movq %r11, %rdx
+ mulxq %rcx, %rdx, %rbx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rbp
+ movq %rbp, -16(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r15
+ addq %rax, %rbx
+ movq %r15, %rdx
+ mulxq %rcx, %rax, %r13
+ adcq %r14, %rax
+ movq %rbp, %rdx
+ mulxq %rcx, %r8, %r12
+ adcq %r13, %r8
+ movq 32(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rcx, %r9, %r13
+ adcq %r12, %r9
+ movq -24(%rsp), %rcx # 8-byte Reload
+ movq %rcx, (%rdi)
+ adcq $0, %r13
+ movq -48(%rsp), %rdi # 8-byte Reload
+ movq 8(%rdi), %rbp
+ movq %r11, %rdx
+ mulxq %rbp, %r12, %r11
+ addq %rbx, %r12
+ movq %r10, %rdx
+ mulxq %rbp, %rbx, %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ adcq %rax, %rbx
+ movq %r15, %rdx
+ mulxq %rbp, %rcx, %r10
+ adcq %r8, %rcx
+ movq -16(%rsp), %rdx # 8-byte Reload
+ mulxq %rbp, %rax, %r8
+ adcq %r9, %rax
+ movq %r14, %rdx
+ mulxq %rbp, %r15, %rdx
+ adcq %r13, %r15
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq %r11, %rbx
+ movq -8(%rsp), %rbp # 8-byte Reload
+ movq %r12, 8(%rbp)
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r10, %rax
+ adcq %r8, %r15
+ adcq %rdx, %r14
+ movq (%rsi), %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq 8(%rsi), %r8
+ movq %r8, -16(%rsp) # 8-byte Spill
+ movq 16(%rdi), %rbp
+ mulxq %rbp, %r12, %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ addq %rbx, %r12
+ movq %r8, %rdx
+ mulxq %rbp, %rbx, %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ adcq %rcx, %rbx
+ movq 16(%rsi), %r11
+ movq %r11, %rdx
+ mulxq %rbp, %rcx, %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ adcq %rax, %rcx
+ movq 24(%rsi), %r13
+ movq %r13, %rdx
+ mulxq %rbp, %r9, %r10
+ adcq %r15, %r9
+ movq 32(%rsi), %r15
+ movq %r15, %rdx
+ mulxq %rbp, %r8, %rdx
+ adcq %r14, %r8
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq -32(%rsp), %rbx # 8-byte Folded Reload
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ adcq -56(%rsp), %r9 # 8-byte Folded Reload
+ adcq %r10, %r8
+ adcq %rdx, %r14
+ movq -8(%rsp), %r10 # 8-byte Reload
+ movq %r12, 16(%r10)
+ movq %rdi, %rbp
+ movq 24(%rbp), %rax
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r12, %rdi
+ addq %rbx, %r12
+ movq -16(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %rbx, %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ adcq %rcx, %rbx
+ movq %r11, %rdx
+ mulxq %rax, %rcx, %r11
+ adcq %r9, %rcx
+ movq %r13, %rdx
+ mulxq %rax, %r13, %r9
+ adcq %r8, %r13
+ movq %r15, %rdx
+ mulxq %rax, %r8, %rdx
+ adcq %r14, %r8
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq %rdi, %rbx
+ movq %r12, 24(%r10)
+ movq %r10, %rdi
+ adcq -16(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r11, %r13
+ adcq %r9, %r8
+ adcq %rdx, %r14
+ movq 32(%rbp), %rdx
+ mulxq 8(%rsi), %rax, %r9
+ mulxq (%rsi), %rbp, %r10
+ addq %rbx, %rbp
+ adcq %rcx, %rax
+ mulxq 16(%rsi), %rbx, %r15
+ adcq %r13, %rbx
+ mulxq 32(%rsi), %rcx, %r11
+ mulxq 24(%rsi), %rsi, %rdx
+ movq %rbp, 32(%rdi)
+ adcq %r8, %rsi
+ adcq %r14, %rcx
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq %r10, %rax
+ movq %rax, 40(%rdi)
+ adcq %r9, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r15, %rsi
+ movq %rsi, 56(%rdi)
+ adcq %rdx, %rcx
+ movq %rcx, 64(%rdi)
+ adcq %r11, %rbp
+ movq %rbp, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end66:
+ .size mcl_fpDbl_mulPre5Lbmi2, .Lfunc_end66-mcl_fpDbl_mulPre5Lbmi2
+
+ .globl mcl_fpDbl_sqrPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre5Lbmi2,@function
+mcl_fpDbl_sqrPre5Lbmi2: # @mcl_fpDbl_sqrPre5Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 16(%rsi), %r11
+ movq (%rsi), %rax
+ movq 8(%rsi), %rcx
+ movq %r11, %rdx
+ mulxq %rax, %rbx, %r15
+ movq 32(%rsi), %r9
+ movq %r9, -8(%rsp) # 8-byte Spill
+ movq 24(%rsi), %r13
+ movq %rcx, %rdx
+ mulxq %rax, %r12, %rbp
+ movq %rbp, -16(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rax, %rdx, %r14
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ addq %r12, %r14
+ adcq %rbp, %rbx
+ movq %r13, %rdx
+ mulxq %rax, %r8, %r10
+ adcq %r15, %r8
+ movq %r9, %rdx
+ mulxq %rax, %rbp, %r15
+ adcq %r10, %rbp
+ movq -24(%rsp), %rax # 8-byte Reload
+ movq %rax, (%rdi)
+ adcq $0, %r15
+ addq %r12, %r14
+ movq %rcx, %rdx
+ mulxq %rcx, %rax, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %rbx, %rax
+ movq %r11, %rdx
+ mulxq %rcx, %rbx, %r10
+ adcq %r8, %rbx
+ movq %r13, %rdx
+ mulxq %rcx, %r13, %r8
+ adcq %rbp, %r13
+ movq %r9, %rdx
+ mulxq %rcx, %r12, %rcx
+ adcq %r15, %r12
+ sbbq %r15, %r15
+ andl $1, %r15d
+ addq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %r14, 8(%rdi)
+ adcq -24(%rsp), %rbx # 8-byte Folded Reload
+ adcq %r10, %r13
+ adcq %r8, %r12
+ adcq %rcx, %r15
+ movq (%rsi), %r9
+ movq 8(%rsi), %r10
+ movq %r9, %rdx
+ mulxq %r11, %rbp, %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ addq %rax, %rbp
+ movq %r10, %rdx
+ mulxq %r11, %rax, %r8
+ adcq %rbx, %rax
+ movq %r11, %rdx
+ mulxq %r11, %r14, %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ adcq %r13, %r14
+ movq 24(%rsi), %rcx
+ movq %rcx, %rdx
+ mulxq %r11, %rbx, %r13
+ adcq %r12, %rbx
+ movq -8(%rsp), %rdx # 8-byte Reload
+ mulxq %r11, %r12, %rdx
+ adcq %r15, %r12
+ sbbq %r15, %r15
+ andl $1, %r15d
+ addq -16(%rsp), %rax # 8-byte Folded Reload
+ adcq %r8, %r14
+ movq %rbp, 16(%rdi)
+ adcq -24(%rsp), %rbx # 8-byte Folded Reload
+ adcq %r13, %r12
+ adcq %rdx, %r15
+ movq %r10, %rdx
+ mulxq %rcx, %r10, %rdx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %r9, %rdx
+ mulxq %rcx, %r13, %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ addq %rax, %r13
+ movq 16(%rsi), %r8
+ movq 32(%rsi), %rax
+ adcq %r14, %r10
+ movq %r8, %rdx
+ mulxq %rcx, %r9, %r14
+ adcq %rbx, %r9
+ movq %rcx, %rdx
+ mulxq %rcx, %r11, %rbp
+ adcq %r12, %r11
+ movq %rax, %rdx
+ mulxq %rcx, %r12, %rdx
+ adcq %r15, %r12
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq -16(%rsp), %r10 # 8-byte Folded Reload
+ movq %r13, 24(%rdi)
+ adcq -8(%rsp), %r9 # 8-byte Folded Reload
+ adcq %r14, %r11
+ adcq %rbp, %r12
+ adcq %rdx, %rbx
+ movq %rax, %rdx
+ mulxq 24(%rsi), %rbp, %r14
+ movq %rax, %rdx
+ mulxq (%rsi), %rcx, %r15
+ addq %r10, %rcx
+ movq %rax, %rdx
+ mulxq 8(%rsi), %rsi, %r10
+ movq %rcx, 32(%rdi)
+ adcq %r9, %rsi
+ movq %r8, %rdx
+ mulxq %rax, %rcx, %r8
+ adcq %r11, %rcx
+ adcq %r12, %rbp
+ movq %rax, %rdx
+ mulxq %rax, %rdx, %rax
+ adcq %rbx, %rdx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq %r15, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r10, %rcx
+ movq %rcx, 48(%rdi)
+ adcq %r8, %rbp
+ movq %rbp, 56(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 64(%rdi)
+ adcq %rax, %rbx
+ movq %rbx, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end67:
+ .size mcl_fpDbl_sqrPre5Lbmi2, .Lfunc_end67-mcl_fpDbl_sqrPre5Lbmi2
+
+ .globl mcl_fp_mont5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont5Lbmi2,@function
+mcl_fp_mont5Lbmi2: # @mcl_fp_mont5Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rdi, -112(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rdi
+ movq %rdi, -64(%rsp) # 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %r10, %rbx
+ movq 24(%rsi), %rdx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ mulxq %rax, %r12, %r14
+ movq 16(%rsi), %rdx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ mulxq %rax, %r13, %r11
+ movq (%rsi), %rbp
+ movq %rbp, -88(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rdx
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ mulxq %rax, %rdi, %r9
+ movq %rbp, %rdx
+ mulxq %rax, %r15, %r8
+ addq %rdi, %r8
+ adcq %r13, %r9
+ adcq %r12, %r11
+ adcq %r10, %r14
+ adcq $0, %rbx
+ movq %rbx, -104(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %r15, %rdx
+ imulq %rax, %rdx
+ movq (%rcx), %rsi
+ movq %rsi, -32(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ mulxq %rax, %rax, %r12
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ mulxq %rax, %r13, %r10
+ movq 8(%rcx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulxq %rax, %rdi, %rbp
+ mulxq %rsi, %rax, %rbx
+ addq %rdi, %rbx
+ movq 16(%rcx), %rcx
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ mulxq %rcx, %rdi, %rcx
+ adcq %rbp, %rdi
+ adcq %r13, %rcx
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r12
+ addq %r15, %rax
+ adcq %r8, %rbx
+ adcq %r9, %rdi
+ adcq %r11, %rcx
+ adcq %r14, %r10
+ adcq -104(%rsp), %r12 # 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq -56(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ mulxq -64(%rsp), %rax, %r14 # 8-byte Folded Reload
+ movq %rax, -104(%rsp) # 8-byte Spill
+ mulxq -72(%rsp), %rax, %r15 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq -80(%rsp), %r13, %r9 # 8-byte Folded Reload
+ mulxq -96(%rsp), %r8, %rsi # 8-byte Folded Reload
+ mulxq -88(%rsp), %r11, %rax # 8-byte Folded Reload
+ addq %r8, %rax
+ adcq %r13, %rsi
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq -104(%rsp), %r15 # 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rbx, %r11
+ adcq %rdi, %rax
+ adcq %rcx, %rsi
+ adcq %r10, %r9
+ adcq %r12, %r15
+ adcq %rbp, %r14
+ sbbq %r12, %r12
+ andl $1, %r12d
+ movq %r11, %rdx
+ imulq -48(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -8(%rsp), %rcx, %r10 # 8-byte Folded Reload
+ movq %rcx, -104(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %rcx, %rdi # 8-byte Folded Reload
+ movq %rcx, -120(%rsp) # 8-byte Spill
+ mulxq -40(%rsp), %r13, %rcx # 8-byte Folded Reload
+ mulxq -24(%rsp), %r8, %rbx # 8-byte Folded Reload
+ mulxq -32(%rsp), %rdx, %rbp # 8-byte Folded Reload
+ addq %r8, %rbp
+ adcq %r13, %rbx
+ adcq -120(%rsp), %rcx # 8-byte Folded Reload
+ adcq -104(%rsp), %rdi # 8-byte Folded Reload
+ adcq $0, %r10
+ addq %r11, %rdx
+ adcq %rax, %rbp
+ adcq %rsi, %rbx
+ adcq %r9, %rcx
+ adcq %r15, %rdi
+ adcq %r14, %r10
+ adcq $0, %r12
+ movq -56(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ mulxq -64(%rsp), %rax, %r15 # 8-byte Folded Reload
+ movq %rax, -104(%rsp) # 8-byte Spill
+ mulxq -72(%rsp), %rax, %r11 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq -80(%rsp), %r13, %r9 # 8-byte Folded Reload
+ mulxq -96(%rsp), %rsi, %r8 # 8-byte Folded Reload
+ mulxq -88(%rsp), %r14, %rax # 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %r13, %r8
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq -104(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r15
+ addq %rbp, %r14
+ adcq %rbx, %rax
+ adcq %rcx, %r8
+ adcq %rdi, %r9
+ adcq %r10, %r11
+ adcq %r12, %r15
+ sbbq %r13, %r13
+ andl $1, %r13d
+ movq %r14, %rdx
+ imulq -48(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -8(%rsp), %rcx, %r12 # 8-byte Folded Reload
+ movq %rcx, -104(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %rcx, %r10 # 8-byte Folded Reload
+ movq %rcx, -120(%rsp) # 8-byte Spill
+ mulxq -40(%rsp), %rdi, %rsi # 8-byte Folded Reload
+ mulxq -24(%rsp), %rcx, %rbx # 8-byte Folded Reload
+ mulxq -32(%rsp), %rdx, %rbp # 8-byte Folded Reload
+ addq %rcx, %rbp
+ adcq %rdi, %rbx
+ adcq -120(%rsp), %rsi # 8-byte Folded Reload
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r12
+ addq %r14, %rdx
+ adcq %rax, %rbp
+ adcq %r8, %rbx
+ adcq %r9, %rsi
+ adcq %r11, %r10
+ adcq %r15, %r12
+ adcq $0, %r13
+ movq -56(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ mulxq -64(%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rcx, -120(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ mulxq -72(%rsp), %r11, %r14 # 8-byte Folded Reload
+ mulxq -80(%rsp), %r8, %r9 # 8-byte Folded Reload
+ mulxq -96(%rsp), %rax, %rdi # 8-byte Folded Reload
+ mulxq -88(%rsp), %r15, %rcx # 8-byte Folded Reload
+ addq %rax, %rcx
+ adcq %r8, %rdi
+ adcq %r11, %r9
+ adcq -120(%rsp), %r14 # 8-byte Folded Reload
+ movq -104(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rbp, %r15
+ adcq %rbx, %rcx
+ adcq %rsi, %rdi
+ adcq %r10, %r9
+ adcq %r12, %r14
+ adcq %r13, %rax
+ movq %rax, -104(%rsp) # 8-byte Spill
+ sbbq %r12, %r12
+ andl $1, %r12d
+ movq %r15, %rdx
+ imulq -48(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -8(%rsp), %rax, %rbp # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %r13, %r10 # 8-byte Folded Reload
+ mulxq -40(%rsp), %rbx, %r8 # 8-byte Folded Reload
+ mulxq -24(%rsp), %rsi, %r11 # 8-byte Folded Reload
+ mulxq -32(%rsp), %rdx, %rax # 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %rbx, %r11
+ adcq %r13, %r8
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %rbp
+ addq %r15, %rdx
+ adcq %rcx, %rax
+ adcq %rdi, %r11
+ adcq %r9, %r8
+ adcq %r14, %r10
+ adcq -104(%rsp), %rbp # 8-byte Folded Reload
+ adcq $0, %r12
+ movq -56(%rsp), %rcx # 8-byte Reload
+ movq 32(%rcx), %rdx
+ mulxq -64(%rsp), %rcx, %r14 # 8-byte Folded Reload
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ mulxq -72(%rsp), %rcx, %rbx # 8-byte Folded Reload
+ movq %rcx, -64(%rsp) # 8-byte Spill
+ mulxq -80(%rsp), %rsi, %r15 # 8-byte Folded Reload
+ mulxq -96(%rsp), %rcx, %r9 # 8-byte Folded Reload
+ mulxq -88(%rsp), %r13, %rdi # 8-byte Folded Reload
+ addq %rcx, %rdi
+ adcq %rsi, %r9
+ adcq -64(%rsp), %r15 # 8-byte Folded Reload
+ adcq -56(%rsp), %rbx # 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rax, %r13
+ adcq %r11, %rdi
+ adcq %r8, %r9
+ adcq %r10, %r15
+ adcq %rbp, %rbx
+ adcq %r12, %r14
+ sbbq %rax, %rax
+ movq -48(%rsp), %rdx # 8-byte Reload
+ imulq %r13, %rdx
+ mulxq -32(%rsp), %r10, %rcx # 8-byte Folded Reload
+ mulxq -24(%rsp), %r8, %rsi # 8-byte Folded Reload
+ addq %rcx, %r8
+ mulxq -40(%rsp), %rbp, %r11 # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ mulxq -16(%rsp), %rcx, %r12 # 8-byte Folded Reload
+ adcq %r11, %rcx
+ mulxq -8(%rsp), %rsi, %r11 # 8-byte Folded Reload
+ adcq %r12, %rsi
+ adcq $0, %r11
+ andl $1, %eax
+ addq %r13, %r10
+ adcq %rdi, %r8
+ adcq %r9, %rbp
+ adcq %r15, %rcx
+ adcq %rbx, %rsi
+ adcq %r14, %r11
+ adcq $0, %rax
+ movq %r8, %rdi
+ subq -32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rbp, %rbx
+ sbbq -24(%rsp), %rbx # 8-byte Folded Reload
+ movq %rcx, %r9
+ sbbq -40(%rsp), %r9 # 8-byte Folded Reload
+ movq %rsi, %rdx
+ sbbq -16(%rsp), %rdx # 8-byte Folded Reload
+ movq %r11, %r10
+ sbbq -8(%rsp), %r10 # 8-byte Folded Reload
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rsi, %rdx
+ testb %al, %al
+ cmovneq %r8, %rdi
+ movq -112(%rsp), %rax # 8-byte Reload
+ movq %rdi, (%rax)
+ cmovneq %rbp, %rbx
+ movq %rbx, 8(%rax)
+ cmovneq %rcx, %r9
+ movq %r9, 16(%rax)
+ movq %rdx, 24(%rax)
+ cmovneq %r11, %r10
+ movq %r10, 32(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end68:
+ .size mcl_fp_mont5Lbmi2, .Lfunc_end68-mcl_fp_mont5Lbmi2
+
+ .globl mcl_fp_montNF5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF5Lbmi2,@function
+mcl_fp_montNF5Lbmi2: # @mcl_fp_montNF5Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq (%rsi), %r13
+ movq %r13, -64(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rbp
+ movq %rbp, -24(%rsp) # 8-byte Spill
+ movq (%rdx), %rax
+ movq %rbp, %rdx
+ mulxq %rax, %rbp, %r9
+ movq %r13, %rdx
+ mulxq %rax, %r8, %r10
+ movq 16(%rsi), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ addq %rbp, %r10
+ mulxq %rax, %rbp, %rbx
+ adcq %r9, %rbp
+ movq 24(%rsi), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ mulxq %rax, %r15, %r9
+ adcq %rbx, %r15
+ movq 32(%rsi), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rax, %rax, %r11
+ adcq %r9, %rax
+ adcq $0, %r11
+ movq -8(%rcx), %rsi
+ movq %rsi, -72(%rsp) # 8-byte Spill
+ movq %r8, %rdx
+ imulq %rsi, %rdx
+ movq (%rcx), %rsi
+ movq %rsi, -88(%rsp) # 8-byte Spill
+ mulxq %rsi, %rbx, %r14
+ addq %r8, %rbx
+ movq 8(%rcx), %rsi
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ mulxq %rsi, %rbx, %r12
+ adcq %r10, %rbx
+ movq 16(%rcx), %rsi
+ movq %rsi, -96(%rsp) # 8-byte Spill
+ mulxq %rsi, %r10, %rdi
+ adcq %rbp, %r10
+ movq 24(%rcx), %rsi
+ movq %rsi, -56(%rsp) # 8-byte Spill
+ mulxq %rsi, %r9, %rbp
+ adcq %r15, %r9
+ movq 32(%rcx), %rcx
+ movq %rcx, -8(%rsp) # 8-byte Spill
+ mulxq %rcx, %r8, %rcx
+ adcq %rax, %r8
+ adcq $0, %r11
+ addq %r14, %rbx
+ adcq %r12, %r10
+ adcq %rdi, %r9
+ adcq %rbp, %r8
+ adcq %rcx, %r11
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ mulxq -24(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ mulxq %r13, %r14, %rax
+ addq %rcx, %rax
+ mulxq -32(%rsp), %rcx, %rdi # 8-byte Folded Reload
+ adcq %rsi, %rcx
+ mulxq -40(%rsp), %rsi, %r15 # 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -48(%rsp), %rdi, %rbp # 8-byte Folded Reload
+ adcq %r15, %rdi
+ adcq $0, %rbp
+ addq %rbx, %r14
+ adcq %r10, %rax
+ adcq %r9, %rcx
+ adcq %r8, %rsi
+ adcq %r11, %rdi
+ adcq $0, %rbp
+ movq %r14, %rdx
+ movq -72(%rsp), %r12 # 8-byte Reload
+ imulq %r12, %rdx
+ mulxq -88(%rsp), %rbx, %r15 # 8-byte Folded Reload
+ addq %r14, %rbx
+ movq -80(%rsp), %r13 # 8-byte Reload
+ mulxq %r13, %r8, %rbx
+ adcq %rax, %r8
+ mulxq -96(%rsp), %r9, %rax # 8-byte Folded Reload
+ adcq %rcx, %r9
+ mulxq -56(%rsp), %r10, %rcx # 8-byte Folded Reload
+ adcq %rsi, %r10
+ mulxq -8(%rsp), %r11, %rdx # 8-byte Folded Reload
+ adcq %rdi, %r11
+ adcq $0, %rbp
+ addq %r15, %r8
+ adcq %rbx, %r9
+ adcq %rax, %r10
+ adcq %rcx, %r11
+ adcq %rdx, %rbp
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ mulxq -24(%rsp), %rcx, %rax # 8-byte Folded Reload
+ mulxq -64(%rsp), %r14, %rsi # 8-byte Folded Reload
+ addq %rcx, %rsi
+ mulxq -32(%rsp), %rbx, %rcx # 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -40(%rsp), %rdi, %r15 # 8-byte Folded Reload
+ adcq %rcx, %rdi
+ mulxq -48(%rsp), %rcx, %rax # 8-byte Folded Reload
+ adcq %r15, %rcx
+ adcq $0, %rax
+ addq %r8, %r14
+ adcq %r9, %rsi
+ adcq %r10, %rbx
+ adcq %r11, %rdi
+ adcq %rbp, %rcx
+ adcq $0, %rax
+ movq %r14, %rdx
+ imulq %r12, %rdx
+ movq -88(%rsp), %r12 # 8-byte Reload
+ mulxq %r12, %rbp, %r15
+ addq %r14, %rbp
+ mulxq %r13, %r8, %rbp
+ adcq %rsi, %r8
+ movq -96(%rsp), %r13 # 8-byte Reload
+ mulxq %r13, %r9, %rsi
+ adcq %rbx, %r9
+ mulxq -56(%rsp), %r10, %rbx # 8-byte Folded Reload
+ adcq %rdi, %r10
+ mulxq -8(%rsp), %r11, %rdx # 8-byte Folded Reload
+ adcq %rcx, %r11
+ adcq $0, %rax
+ addq %r15, %r8
+ adcq %rbp, %r9
+ adcq %rsi, %r10
+ adcq %rbx, %r11
+ adcq %rdx, %rax
+ movq -16(%rsp), %rcx # 8-byte Reload
+ movq 24(%rcx), %rdx
+ mulxq -24(%rsp), %rdi, %rsi # 8-byte Folded Reload
+ mulxq -64(%rsp), %r14, %rcx # 8-byte Folded Reload
+ addq %rdi, %rcx
+ mulxq -32(%rsp), %rbx, %rdi # 8-byte Folded Reload
+ adcq %rsi, %rbx
+ mulxq -40(%rsp), %rsi, %r15 # 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -48(%rsp), %rdi, %rbp # 8-byte Folded Reload
+ adcq %r15, %rdi
+ adcq $0, %rbp
+ addq %r8, %r14
+ adcq %r9, %rcx
+ adcq %r10, %rbx
+ adcq %r11, %rsi
+ adcq %rax, %rdi
+ adcq $0, %rbp
+ movq %r14, %rdx
+ imulq -72(%rsp), %rdx # 8-byte Folded Reload
+ mulxq %r12, %rax, %r11
+ addq %r14, %rax
+ mulxq -80(%rsp), %r8, %r14 # 8-byte Folded Reload
+ adcq %rcx, %r8
+ mulxq %r13, %r9, %rax
+ adcq %rbx, %r9
+ movq -56(%rsp), %r12 # 8-byte Reload
+ mulxq %r12, %r10, %rbx
+ adcq %rsi, %r10
+ mulxq -8(%rsp), %rcx, %rdx # 8-byte Folded Reload
+ adcq %rdi, %rcx
+ adcq $0, %rbp
+ addq %r11, %r8
+ adcq %r14, %r9
+ adcq %rax, %r10
+ adcq %rbx, %rcx
+ adcq %rdx, %rbp
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ mulxq -24(%rsp), %rdi, %rbx # 8-byte Folded Reload
+ mulxq -64(%rsp), %r14, %rsi # 8-byte Folded Reload
+ addq %rdi, %rsi
+ mulxq -32(%rsp), %rdi, %rax # 8-byte Folded Reload
+ adcq %rbx, %rdi
+ mulxq -40(%rsp), %rbx, %r15 # 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -48(%rsp), %r11, %rax # 8-byte Folded Reload
+ adcq %r15, %r11
+ adcq $0, %rax
+ addq %r8, %r14
+ adcq %r9, %rsi
+ adcq %r10, %rdi
+ adcq %rcx, %rbx
+ adcq %rbp, %r11
+ adcq $0, %rax
+ movq -72(%rsp), %rdx # 8-byte Reload
+ imulq %r14, %rdx
+ movq -88(%rsp), %r10 # 8-byte Reload
+ mulxq %r10, %rcx, %rbp
+ movq %rbp, -16(%rsp) # 8-byte Spill
+ addq %r14, %rcx
+ movq -80(%rsp), %r9 # 8-byte Reload
+ mulxq %r9, %r14, %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ adcq %rsi, %r14
+ movq %r13, %r8
+ mulxq %r8, %r15, %r13
+ adcq %rdi, %r15
+ mulxq %r12, %rbp, %rcx
+ adcq %rbx, %rbp
+ movq -8(%rsp), %rbx # 8-byte Reload
+ mulxq %rbx, %r12, %rdx
+ adcq %r11, %r12
+ adcq $0, %rax
+ addq -16(%rsp), %r14 # 8-byte Folded Reload
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ adcq %r13, %rbp
+ adcq %rcx, %r12
+ adcq %rdx, %rax
+ movq %r14, %rcx
+ subq %r10, %rcx
+ movq %r15, %rsi
+ sbbq %r9, %rsi
+ movq %rbp, %rdi
+ sbbq %r8, %rdi
+ movq %r12, %r8
+ sbbq -56(%rsp), %r8 # 8-byte Folded Reload
+ movq %rax, %rdx
+ sbbq %rbx, %rdx
+ movq %rdx, %rbx
+ sarq $63, %rbx
+ cmovsq %r14, %rcx
+ movq -104(%rsp), %rbx # 8-byte Reload
+ movq %rcx, (%rbx)
+ cmovsq %r15, %rsi
+ movq %rsi, 8(%rbx)
+ cmovsq %rbp, %rdi
+ movq %rdi, 16(%rbx)
+ cmovsq %r12, %r8
+ movq %r8, 24(%rbx)
+ cmovsq %rax, %rdx
+ movq %rdx, 32(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end69:
+ .size mcl_fp_montNF5Lbmi2, .Lfunc_end69-mcl_fp_montNF5Lbmi2
+
+ .globl mcl_fp_montRed5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed5Lbmi2,@function
+mcl_fp_montRed5Lbmi2: # @mcl_fp_montRed5Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq (%rcx), %rbx
+ movq %rbx, -8(%rsp) # 8-byte Spill
+ movq (%rsi), %r9
+ movq %r9, %rdx
+ imulq %rax, %rdx
+ movq %rax, %r15
+ movq 32(%rcx), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ mulxq %rax, %r8, %r13
+ movq 24(%rcx), %r12
+ movq %r12, -32(%rsp) # 8-byte Spill
+ mulxq %r12, %r10, %r14
+ movq 16(%rcx), %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ mulxq %rax, %rdi, %rbp
+ mulxq %rcx, %rax, %r11
+ mulxq %rbx, %rdx, %rcx
+ addq %rax, %rcx
+ adcq %rdi, %r11
+ adcq %r10, %rbp
+ adcq %r8, %r14
+ adcq $0, %r13
+ addq %r9, %rdx
+ movq 72(%rsi), %rax
+ movq 64(%rsi), %rdx
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r11
+ adcq 24(%rsi), %rbp
+ adcq 32(%rsi), %r14
+ adcq 40(%rsi), %r13
+ movq 56(%rsi), %rdi
+ movq 48(%rsi), %rsi
+ adcq $0, %rsi
+ movq %rsi, -88(%rsp) # 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rcx, %rdx
+ imulq %r15, %rdx
+ mulxq -40(%rsp), %rax, %r15 # 8-byte Folded Reload
+ movq %rax, -96(%rsp) # 8-byte Spill
+ mulxq %r12, %rax, %r10
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq -48(%rsp), %r12 # 8-byte Reload
+ mulxq %r12, %rbx, %r8
+ mulxq -16(%rsp), %r9, %rdi # 8-byte Folded Reload
+ mulxq -8(%rsp), %rdx, %rax # 8-byte Folded Reload
+ addq %r9, %rax
+ adcq %rbx, %rdi
+ adcq -104(%rsp), %r8 # 8-byte Folded Reload
+ adcq -96(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r15
+ addq %rcx, %rdx
+ adcq %r11, %rax
+ adcq %rbp, %rdi
+ adcq %r14, %r8
+ adcq %r13, %r10
+ adcq -88(%rsp), %r15 # 8-byte Folded Reload
+ adcq $0, -72(%rsp) # 8-byte Folded Spill
+ adcq $0, -56(%rsp) # 8-byte Folded Spill
+ adcq $0, -24(%rsp) # 8-byte Folded Spill
+ adcq $0, %rsi
+ movq %rax, %rdx
+ imulq -64(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -40(%rsp), %rcx, %r13 # 8-byte Folded Reload
+ movq %rcx, -88(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rcx, %r14 # 8-byte Folded Reload
+ movq %rcx, -96(%rsp) # 8-byte Spill
+ mulxq %r12, %r11, %rbx
+ mulxq -16(%rsp), %r9, %rbp # 8-byte Folded Reload
+ mulxq -8(%rsp), %rdx, %rcx # 8-byte Folded Reload
+ addq %r9, %rcx
+ adcq %r11, %rbp
+ adcq -96(%rsp), %rbx # 8-byte Folded Reload
+ adcq -88(%rsp), %r14 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rax, %rdx
+ adcq %rdi, %rcx
+ adcq %r8, %rbp
+ adcq %r10, %rbx
+ adcq %r15, %r14
+ adcq -72(%rsp), %r13 # 8-byte Folded Reload
+ adcq $0, -56(%rsp) # 8-byte Folded Spill
+ adcq $0, -24(%rsp) # 8-byte Folded Spill
+ adcq $0, %rsi
+ movq %rcx, %rdx
+ imulq -64(%rsp), %rdx # 8-byte Folded Reload
+ movq -40(%rsp), %r9 # 8-byte Reload
+ mulxq %r9, %rax, %r12
+ movq %rax, -72(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rax, %r10 # 8-byte Folded Reload
+ movq %rax, -88(%rsp) # 8-byte Spill
+ mulxq -48(%rsp), %r8, %r11 # 8-byte Folded Reload
+ mulxq -16(%rsp), %rdi, %r15 # 8-byte Folded Reload
+ mulxq -8(%rsp), %rdx, %rax # 8-byte Folded Reload
+ addq %rdi, %rax
+ adcq %r8, %r15
+ adcq -88(%rsp), %r11 # 8-byte Folded Reload
+ adcq -72(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r12
+ addq %rcx, %rdx
+ adcq %rbp, %rax
+ adcq %rbx, %r15
+ adcq %r14, %r11
+ adcq %r13, %r10
+ adcq -56(%rsp), %r12 # 8-byte Folded Reload
+ adcq $0, -24(%rsp) # 8-byte Folded Spill
+ adcq $0, %rsi
+ movq -64(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ mulxq %r9, %rdi, %rcx
+ movq %rdi, -56(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rbp, %rdi # 8-byte Folded Reload
+ movq %rbp, -64(%rsp) # 8-byte Spill
+ mulxq -48(%rsp), %r13, %rbp # 8-byte Folded Reload
+ mulxq -8(%rsp), %r8, %r9 # 8-byte Folded Reload
+ movq -16(%rsp), %r14 # 8-byte Reload
+ mulxq %r14, %rbx, %rdx
+ addq %r9, %rbx
+ adcq %r13, %rdx
+ adcq -64(%rsp), %rbp # 8-byte Folded Reload
+ adcq -56(%rsp), %rdi # 8-byte Folded Reload
+ adcq $0, %rcx
+ addq %rax, %r8
+ adcq %r15, %rbx
+ adcq %r11, %rdx
+ adcq %r10, %rbp
+ adcq %r12, %rdi
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ adcq $0, %rsi
+ movq %rbx, %rax
+ subq -8(%rsp), %rax # 8-byte Folded Reload
+ movq %rdx, %r8
+ sbbq %r14, %r8
+ movq %rbp, %r9
+ sbbq -48(%rsp), %r9 # 8-byte Folded Reload
+ movq %rdi, %r10
+ sbbq -32(%rsp), %r10 # 8-byte Folded Reload
+ movq %rcx, %r11
+ sbbq -40(%rsp), %r11 # 8-byte Folded Reload
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rcx, %r11
+ testb %sil, %sil
+ cmovneq %rbx, %rax
+ movq -80(%rsp), %rcx # 8-byte Reload
+ movq %rax, (%rcx)
+ cmovneq %rdx, %r8
+ movq %r8, 8(%rcx)
+ cmovneq %rbp, %r9
+ movq %r9, 16(%rcx)
+ cmovneq %rdi, %r10
+ movq %r10, 24(%rcx)
+ movq %r11, 32(%rcx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end70:
+ .size mcl_fp_montRed5Lbmi2, .Lfunc_end70-mcl_fp_montRed5Lbmi2
+
+ .globl mcl_fp_addPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre5Lbmi2,@function
+mcl_fp_addPre5Lbmi2: # @mcl_fp_addPre5Lbmi2
+# BB#0:
+ movq 32(%rdx), %r8
+ movq 24(%rdx), %r9
+ movq 24(%rsi), %r11
+ movq 32(%rsi), %r10
+ movq 16(%rdx), %rcx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rcx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ adcq %r9, %r11
+ movq %r11, 24(%rdi)
+ adcq %r8, %r10
+ movq %r10, 32(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end71:
+ .size mcl_fp_addPre5Lbmi2, .Lfunc_end71-mcl_fp_addPre5Lbmi2
+
+ .globl mcl_fp_subPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre5Lbmi2,@function
+mcl_fp_subPre5Lbmi2: # @mcl_fp_subPre5Lbmi2
+# BB#0:
+ pushq %rbx
+ movq 32(%rsi), %r10
+ movq 24(%rdx), %r8
+ movq 32(%rdx), %r9
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %rcx
+ movq %rbx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r8, %r11
+ movq %r11, 24(%rdi)
+ sbbq %r9, %r10
+ movq %r10, 32(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ retq
+.Lfunc_end72:
+ .size mcl_fp_subPre5Lbmi2, .Lfunc_end72-mcl_fp_subPre5Lbmi2
+
+ .globl mcl_fp_shr1_5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_5Lbmi2,@function
+mcl_fp_shr1_5Lbmi2: # @mcl_fp_shr1_5Lbmi2
+# BB#0:
+ movq 32(%rsi), %r8
+ movq 24(%rsi), %rcx
+ movq 16(%rsi), %rdx
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rax
+ movq %rax, (%rdi)
+ shrdq $1, %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 16(%rdi)
+ shrdq $1, %r8, %rcx
+ movq %rcx, 24(%rdi)
+ shrq %r8
+ movq %r8, 32(%rdi)
+ retq
+.Lfunc_end73:
+ .size mcl_fp_shr1_5Lbmi2, .Lfunc_end73-mcl_fp_shr1_5Lbmi2
+
+ .globl mcl_fp_add5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add5Lbmi2,@function
+mcl_fp_add5Lbmi2: # @mcl_fp_add5Lbmi2
+# BB#0:
+ pushq %rbx
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %rbx
+ movq 24(%rsi), %r9
+ movq 32(%rsi), %r8
+ movq 16(%rdx), %r10
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r10
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ adcq %rbx, %r9
+ movq %r9, 24(%rdi)
+ adcq %r11, %r8
+ movq %r8, 32(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r10
+ sbbq 24(%rcx), %r9
+ sbbq 32(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB74_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r9, 24(%rdi)
+ movq %r8, 32(%rdi)
+.LBB74_2: # %carry
+ popq %rbx
+ retq
+.Lfunc_end74:
+ .size mcl_fp_add5Lbmi2, .Lfunc_end74-mcl_fp_add5Lbmi2
+
+ .globl mcl_fp_addNF5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF5Lbmi2,@function
+mcl_fp_addNF5Lbmi2: # @mcl_fp_addNF5Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 32(%rdx), %r8
+ movq 24(%rdx), %r9
+ movq 16(%rdx), %r10
+ movq (%rdx), %r14
+ movq 8(%rdx), %r11
+ addq (%rsi), %r14
+ adcq 8(%rsi), %r11
+ adcq 16(%rsi), %r10
+ adcq 24(%rsi), %r9
+ adcq 32(%rsi), %r8
+ movq %r14, %rsi
+ subq (%rcx), %rsi
+ movq %r11, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r10, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r9, %r15
+ sbbq 24(%rcx), %r15
+ movq %r8, %rax
+ sbbq 32(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r14, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ cmovsq %r9, %r15
+ movq %r15, 24(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 32(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end75:
+ .size mcl_fp_addNF5Lbmi2, .Lfunc_end75-mcl_fp_addNF5Lbmi2
+
+ .globl mcl_fp_sub5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub5Lbmi2,@function
+mcl_fp_sub5Lbmi2: # @mcl_fp_sub5Lbmi2
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 32(%rsi), %r8
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r10, 16(%rdi)
+ sbbq %r11, %r9
+ movq %r9, 24(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 32(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB76_2
+# BB#1: # %carry
+ movq 32(%rcx), %r11
+ movq 24(%rcx), %r14
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rbx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %rsi, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r9, %r14
+ movq %r14, 24(%rdi)
+ adcq %r8, %r11
+ movq %r11, 32(%rdi)
+.LBB76_2: # %nocarry
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end76:
+ .size mcl_fp_sub5Lbmi2, .Lfunc_end76-mcl_fp_sub5Lbmi2
+
+ .globl mcl_fp_subNF5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF5Lbmi2,@function
+mcl_fp_subNF5Lbmi2: # @mcl_fp_subNF5Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 32(%rsi), %r11
+ movq 24(%rsi), %r8
+ movq 16(%rsi), %r9
+ movq (%rsi), %r10
+ movq 8(%rsi), %r14
+ subq (%rdx), %r10
+ sbbq 8(%rdx), %r14
+ sbbq 16(%rdx), %r9
+ sbbq 24(%rdx), %r8
+ sbbq 32(%rdx), %r11
+ movq %r11, %rax
+ sarq $63, %rax
+ movq %rax, %rdx
+ shldq $1, %r11, %rdx
+ movq 8(%rcx), %rbx
+ andq %rdx, %rbx
+ andq (%rcx), %rdx
+ movq 32(%rcx), %r15
+ andq %rax, %r15
+ rorxq $63, %rax, %rsi
+ andq 24(%rcx), %rax
+ andq 16(%rcx), %rsi
+ addq %r10, %rdx
+ movq %rdx, (%rdi)
+ adcq %r14, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r9, %rsi
+ movq %rsi, 16(%rdi)
+ adcq %r8, %rax
+ movq %rax, 24(%rdi)
+ adcq %r11, %r15
+ movq %r15, 32(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end77:
+ .size mcl_fp_subNF5Lbmi2, .Lfunc_end77-mcl_fp_subNF5Lbmi2
+
+ .globl mcl_fpDbl_add5Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add5Lbmi2,@function
+mcl_fpDbl_add5Lbmi2: # @mcl_fpDbl_add5Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 72(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 64(%rdx), %r11
+ movq 56(%rdx), %r14
+ movq 48(%rdx), %r15
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %r13
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %rbp
+ adcq 32(%rdx), %r13
+ movq 40(%rdx), %r9
+ movq %rbx, (%rdi)
+ movq 72(%rsi), %r8
+ movq %rax, 8(%rdi)
+ movq 64(%rsi), %r10
+ movq %r12, 16(%rdi)
+ movq 56(%rsi), %r12
+ movq %rbp, 24(%rdi)
+ movq 48(%rsi), %rbp
+ movq 40(%rsi), %rbx
+ movq %r13, 32(%rdi)
+ adcq %r9, %rbx
+ adcq %r15, %rbp
+ adcq %r14, %r12
+ adcq %r11, %r10
+ adcq -8(%rsp), %r8 # 8-byte Folded Reload
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rbx, %rax
+ subq (%rcx), %rax
+ movq %rbp, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r12, %r9
+ sbbq 16(%rcx), %r9
+ movq %r10, %r11
+ sbbq 24(%rcx), %r11
+ movq %r8, %r14
+ sbbq 32(%rcx), %r14
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rbx, %rax
+ movq %rax, 40(%rdi)
+ testb %sil, %sil
+ cmovneq %rbp, %rdx
+ movq %rdx, 48(%rdi)
+ cmovneq %r12, %r9
+ movq %r9, 56(%rdi)
+ cmovneq %r10, %r11
+ movq %r11, 64(%rdi)
+ cmovneq %r8, %r14
+ movq %r14, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end78:
+ .size mcl_fpDbl_add5Lbmi2, .Lfunc_end78-mcl_fpDbl_add5Lbmi2
+
+ .globl mcl_fpDbl_sub5Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub5Lbmi2,@function
+mcl_fpDbl_sub5Lbmi2: # @mcl_fpDbl_sub5Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 72(%rdx), %r9
+ movq 64(%rdx), %r10
+ movq 56(%rdx), %r14
+ movq 16(%rsi), %r8
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %eax, %eax
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r8
+ movq 24(%rsi), %r12
+ sbbq 24(%rdx), %r12
+ movq %r15, (%rdi)
+ movq 32(%rsi), %rbx
+ sbbq 32(%rdx), %rbx
+ movq %r11, 8(%rdi)
+ movq 48(%rdx), %r15
+ movq 40(%rdx), %rdx
+ movq %r8, 16(%rdi)
+ movq 72(%rsi), %r8
+ movq %r12, 24(%rdi)
+ movq 64(%rsi), %r11
+ movq %rbx, 32(%rdi)
+ movq 40(%rsi), %rbp
+ sbbq %rdx, %rbp
+ movq 56(%rsi), %r12
+ movq 48(%rsi), %r13
+ sbbq %r15, %r13
+ sbbq %r14, %r12
+ sbbq %r10, %r11
+ sbbq %r9, %r8
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ movq 16(%rcx), %rdx
+ cmoveq %rax, %rdx
+ movq 8(%rcx), %rbx
+ cmoveq %rax, %rbx
+ movq 32(%rcx), %r9
+ cmoveq %rax, %r9
+ cmovneq 24(%rcx), %rax
+ addq %rbp, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r13, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r12, %rdx
+ movq %rdx, 56(%rdi)
+ adcq %r11, %rax
+ movq %rax, 64(%rdi)
+ adcq %r8, %r9
+ movq %r9, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end79:
+ .size mcl_fpDbl_sub5Lbmi2, .Lfunc_end79-mcl_fpDbl_sub5Lbmi2
+
+ .globl mcl_fp_mulUnitPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre6Lbmi2,@function
+mcl_fp_mulUnitPre6Lbmi2: # @mcl_fp_mulUnitPre6Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ mulxq 40(%rsi), %r8, %r11
+ mulxq 32(%rsi), %r9, %r12
+ mulxq 24(%rsi), %r10, %rcx
+ mulxq 16(%rsi), %r14, %rbx
+ mulxq 8(%rsi), %r15, %rax
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %r15, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r14, %rax
+ movq %rax, 16(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r9, %rcx
+ movq %rcx, 32(%rdi)
+ adcq %r8, %r12
+ movq %r12, 40(%rdi)
+ adcq $0, %r11
+ movq %r11, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end80:
+ .size mcl_fp_mulUnitPre6Lbmi2, .Lfunc_end80-mcl_fp_mulUnitPre6Lbmi2
+
+ .globl mcl_fpDbl_mulPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre6Lbmi2,@function
+mcl_fpDbl_mulPre6Lbmi2: # @mcl_fpDbl_mulPre6Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r11
+ movq %r11, -16(%rsp) # 8-byte Spill
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq (%rsi), %r15
+ movq 8(%rsi), %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ movq (%r11), %rax
+ movq %rcx, %rdx
+ mulxq %rax, %rcx, %r14
+ movq %r15, %rdx
+ mulxq %rax, %rdx, %rbp
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rbx
+ movq %rbx, -32(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ addq %rcx, %rbp
+ mulxq %rax, %rcx, %r12
+ adcq %r14, %rcx
+ movq %rbx, %rdx
+ mulxq %rax, %rbx, %r14
+ adcq %r12, %rbx
+ movq 32(%rsi), %r12
+ movq %r12, %rdx
+ mulxq %rax, %r8, %r13
+ adcq %r14, %r8
+ movq 40(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rax, %r9, %r10
+ adcq %r13, %r9
+ movq -48(%rsp), %rax # 8-byte Reload
+ movq %rax, (%rdi)
+ adcq $0, %r10
+ movq 8(%r11), %rdi
+ movq %r15, %rdx
+ mulxq %rdi, %r13, %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ addq %rbp, %r13
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rbp, %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ adcq %rcx, %rbp
+ movq -40(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rax, %r11
+ adcq %rbx, %rax
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rbx, %rcx
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ adcq %r8, %rbx
+ movq %r12, %rdx
+ mulxq %rdi, %rcx, %r8
+ adcq %r9, %rcx
+ movq %r14, %rdx
+ mulxq %rdi, %r12, %rdx
+ adcq %r10, %r12
+ sbbq %r15, %r15
+ andl $1, %r15d
+ addq -48(%rsp), %rbp # 8-byte Folded Reload
+ adcq -24(%rsp), %rax # 8-byte Folded Reload
+ adcq %r11, %rbx
+ movq -8(%rsp), %rdi # 8-byte Reload
+ movq %r13, 8(%rdi)
+ adcq -32(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r8, %r12
+ adcq %rdx, %r15
+ movq (%rsi), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %r8
+ movq %r8, -24(%rsp) # 8-byte Spill
+ movq -16(%rsp), %r14 # 8-byte Reload
+ movq 16(%r14), %rdi
+ mulxq %rdi, %r13, %rdx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ addq %rbp, %r13
+ movq %r8, %rdx
+ mulxq %rdi, %r8, %rdx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ adcq %rax, %r8
+ movq 16(%rsi), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ mulxq %rdi, %r11, %rax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ adcq %rbx, %r11
+ movq 24(%rsi), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rdi, %rax, %rbx
+ adcq %rcx, %rax
+ movq 32(%rsi), %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ mulxq %rdi, %r10, %rcx
+ adcq %r12, %r10
+ movq 40(%rsi), %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ mulxq %rdi, %r9, %rdx
+ adcq %r15, %r9
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq -72(%rsp), %r8 # 8-byte Folded Reload
+ adcq -80(%rsp), %r11 # 8-byte Folded Reload
+ adcq -88(%rsp), %rax # 8-byte Folded Reload
+ adcq %rbx, %r10
+ adcq %rcx, %r9
+ adcq %rdx, %rbp
+ movq -8(%rsp), %rcx # 8-byte Reload
+ movq %r13, 16(%rcx)
+ movq 24(%r14), %rdi
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r12, %rcx
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ addq %r8, %r12
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rbx, %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ adcq %r11, %rbx
+ movq -40(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rcx, %r11
+ adcq %rax, %rcx
+ movq -48(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r14, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ adcq %r10, %r14
+ movq -56(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r8, %rax
+ adcq %r9, %r8
+ movq -64(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r13, %rdx
+ adcq %rbp, %r13
+ sbbq %r15, %r15
+ andl $1, %r15d
+ addq -32(%rsp), %rbx # 8-byte Folded Reload
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r11, %r14
+ movq -8(%rsp), %rdi # 8-byte Reload
+ movq %r12, 24(%rdi)
+ adcq -40(%rsp), %r8 # 8-byte Folded Reload
+ adcq %rax, %r13
+ adcq %rdx, %r15
+ movq (%rsi), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rbp
+ movq %rbp, -24(%rsp) # 8-byte Spill
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdi
+ mulxq %rdi, %r12, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ addq %rbx, %r12
+ movq %rbp, %rdx
+ mulxq %rdi, %rbx, %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ adcq %rcx, %rbx
+ movq 16(%rsi), %r11
+ movq %r11, %rdx
+ mulxq %rdi, %rax, %rcx
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ adcq %r14, %rax
+ movq 24(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rdi, %rbp, %rcx
+ movq %rcx, -64(%rsp) # 8-byte Spill
+ adcq %r8, %rbp
+ movq 32(%rsi), %r8
+ movq %r8, %rdx
+ mulxq %rdi, %rcx, %r10
+ adcq %r13, %rcx
+ movq 40(%rsi), %r13
+ movq %r13, %rdx
+ mulxq %rdi, %r9, %rdx
+ adcq %r15, %r9
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq -40(%rsp), %rbx # 8-byte Folded Reload
+ adcq -48(%rsp), %rax # 8-byte Folded Reload
+ adcq -56(%rsp), %rbp # 8-byte Folded Reload
+ adcq -64(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r10, %r9
+ adcq %rdx, %rsi
+ movq -8(%rsp), %r10 # 8-byte Reload
+ movq %r12, 32(%r10)
+ movq -16(%rsp), %rdx # 8-byte Reload
+ movq 40(%rdx), %rdi
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r15, %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ addq %rbx, %r15
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rbx, %r12
+ adcq %rax, %rbx
+ movq %r11, %rdx
+ mulxq %rdi, %rax, %r11
+ adcq %rbp, %rax
+ movq %r14, %rdx
+ mulxq %rdi, %rbp, %r14
+ adcq %rcx, %rbp
+ movq %r8, %rdx
+ mulxq %rdi, %rcx, %r8
+ adcq %r9, %rcx
+ movq %r13, %rdx
+ mulxq %rdi, %rdi, %r9
+ adcq %rsi, %rdi
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq -16(%rsp), %rbx # 8-byte Folded Reload
+ movq %r15, 40(%r10)
+ movq %rbx, 48(%r10)
+ adcq %r12, %rax
+ movq %rax, 56(%r10)
+ adcq %r11, %rbp
+ movq %rbp, 64(%r10)
+ adcq %r14, %rcx
+ movq %rcx, 72(%r10)
+ adcq %r8, %rdi
+ movq %rdi, 80(%r10)
+ adcq %r9, %rsi
+ movq %rsi, 88(%r10)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end81:
+ .size mcl_fpDbl_mulPre6Lbmi2, .Lfunc_end81-mcl_fpDbl_mulPre6Lbmi2
+
+ .globl mcl_fpDbl_sqrPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre6Lbmi2,@function
+mcl_fpDbl_sqrPre6Lbmi2: # @mcl_fpDbl_sqrPre6Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, %r9
+ movq %r9, -8(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rax
+ mulxq %rcx, %r10, %r8
+ movq 24(%rsi), %rbp
+ movq %rbp, -32(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rcx, %r11, %rbx
+ movq %rbx, -16(%rsp) # 8-byte Spill
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %r14
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ addq %r11, %r14
+ adcq %rbx, %r10
+ movq %rbp, %rdx
+ mulxq %rcx, %r15, %rbp
+ adcq %r8, %r15
+ movq 32(%rsi), %rbx
+ movq %rbx, %rdx
+ mulxq %rcx, %r8, %r13
+ adcq %rbp, %r8
+ movq 40(%rsi), %rdi
+ movq %rdi, %rdx
+ mulxq %rcx, %rcx, %r12
+ adcq %r13, %rcx
+ movq -40(%rsp), %rdx # 8-byte Reload
+ movq %rdx, (%r9)
+ adcq $0, %r12
+ addq %r11, %r14
+ movq %rax, %rdx
+ mulxq %rax, %rbp, %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ adcq %r10, %rbp
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r13, %r10
+ adcq %r15, %r13
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r15, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %r8, %r15
+ movq %rbx, %rdx
+ mulxq %rax, %rbx, %r8
+ adcq %rcx, %rbx
+ movq %rdi, %rdx
+ mulxq %rax, %r11, %rax
+ adcq %r12, %r11
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -16(%rsp), %rbp # 8-byte Folded Reload
+ adcq -40(%rsp), %r13 # 8-byte Folded Reload
+ movq %r14, 8(%r9)
+ adcq %r10, %r15
+ adcq -24(%rsp), %rbx # 8-byte Folded Reload
+ adcq %r8, %r11
+ adcq %rax, %r12
+ movq (%rsi), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rcx
+ mulxq %rcx, %rax, %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ addq %rbp, %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rdi, %rdx
+ mulxq %rcx, %rbp, %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ adcq %r13, %rbp
+ movq %rcx, %rdx
+ mulxq %rcx, %r13, %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ adcq %r15, %r13
+ movq 24(%rsi), %rax
+ movq %rax, %rdx
+ mulxq %rcx, %r8, %rdi
+ movq %rdi, -40(%rsp) # 8-byte Spill
+ adcq %r8, %rbx
+ movq 32(%rsi), %r10
+ movq %r10, %rdx
+ mulxq %rcx, %r14, %r15
+ adcq %r11, %r14
+ movq 40(%rsi), %r11
+ movq %r11, %rdx
+ mulxq %rcx, %r9, %rdx
+ adcq %r12, %r9
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq -48(%rsp), %rbp # 8-byte Folded Reload
+ adcq -56(%rsp), %r13 # 8-byte Folded Reload
+ adcq -64(%rsp), %rbx # 8-byte Folded Reload
+ adcq %rdi, %r14
+ adcq %r15, %r9
+ adcq %rdx, %rcx
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %rdi, %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ addq %rbp, %rdi
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r15, %rbp
+ adcq %r13, %r15
+ adcq %r8, %rbx
+ movq %rax, %rdx
+ mulxq %rax, %r8, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %r14, %r8
+ movq %r10, %rdx
+ mulxq %rax, %r12, %r10
+ adcq %r9, %r12
+ movq %r11, %rdx
+ mulxq %rax, %r13, %rax
+ adcq %rcx, %r13
+ sbbq %r9, %r9
+ andl $1, %r9d
+ addq -32(%rsp), %r15 # 8-byte Folded Reload
+ adcq %rbp, %rbx
+ movq -8(%rsp), %rdx # 8-byte Reload
+ movq -16(%rsp), %rbp # 8-byte Reload
+ movq %rbp, 16(%rdx)
+ movq %rdi, 24(%rdx)
+ adcq -40(%rsp), %r8 # 8-byte Folded Reload
+ adcq -24(%rsp), %r12 # 8-byte Folded Reload
+ adcq %r10, %r13
+ adcq %rax, %r9
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rcx, %rdx
+ mulxq %rax, %rdx, %rbp
+ movq %rbp, -40(%rsp) # 8-byte Spill
+ addq %r15, %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rdi, %rdx
+ mulxq %rax, %r15, %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ adcq %rbx, %r15
+ movq 16(%rsi), %r10
+ movq %r10, %rdx
+ mulxq %rax, %r14, %rbx
+ adcq %r8, %r14
+ movq 24(%rsi), %r8
+ movq %r8, %rdx
+ mulxq %rax, %rbp, %rdi
+ adcq %r12, %rbp
+ movq %rax, %rdx
+ mulxq %rax, %r11, %r12
+ adcq %r13, %r11
+ movq 40(%rsi), %rsi
+ movq %rsi, %rdx
+ mulxq %rax, %r13, %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ adcq %r13, %r9
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq -40(%rsp), %r15 # 8-byte Folded Reload
+ adcq -48(%rsp), %r14 # 8-byte Folded Reload
+ adcq %rbx, %rbp
+ adcq %rdi, %r11
+ adcq %r12, %r9
+ adcq %rdx, %rax
+ movq %rcx, %rdx
+ mulxq %rsi, %r12, %rcx
+ addq %r15, %r12
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rsi, %rdi, %r15
+ adcq %r14, %rdi
+ movq %r10, %rdx
+ mulxq %rsi, %rbx, %r10
+ adcq %rbp, %rbx
+ movq %r8, %rdx
+ mulxq %rsi, %rbp, %r8
+ adcq %r11, %rbp
+ adcq %r13, %r9
+ movq %rsi, %rdx
+ mulxq %rsi, %rsi, %r11
+ adcq %rax, %rsi
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rcx, %rdi
+ movq -8(%rsp), %rdx # 8-byte Reload
+ movq -16(%rsp), %rcx # 8-byte Reload
+ movq %rcx, 32(%rdx)
+ movq %r12, 40(%rdx)
+ movq %rdi, 48(%rdx)
+ adcq %r15, %rbx
+ movq %rbx, 56(%rdx)
+ adcq %r10, %rbp
+ movq %rbp, 64(%rdx)
+ adcq %r8, %r9
+ movq %r9, 72(%rdx)
+ adcq -32(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 80(%rdx)
+ adcq %r11, %rax
+ movq %rax, 88(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end82:
+ .size mcl_fpDbl_sqrPre6Lbmi2, .Lfunc_end82-mcl_fpDbl_sqrPre6Lbmi2
+
+ .globl mcl_fp_mont6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont6Lbmi2,@function
+mcl_fp_mont6Lbmi2: # @mcl_fp_mont6Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $32, %rsp
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq 40(%rsi), %rdi
+ movq %rdi, -40(%rsp) # 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %r11, %r14
+ movq 32(%rsi), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rax, %r15, %rbx
+ movq 24(%rsi), %rdx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rdi
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ movq (%rsi), %rbp
+ movq %rbp, -56(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rsi
+ movq %rsi, -64(%rsp) # 8-byte Spill
+ mulxq %rax, %r8, %r12
+ movq %rdi, %rdx
+ mulxq %rax, %r9, %r10
+ movq %rsi, %rdx
+ mulxq %rax, %rdi, %r13
+ movq %rbp, %rdx
+ mulxq %rax, %rdx, %rbp
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ addq %rdi, %rbp
+ adcq %r9, %r13
+ adcq %r8, %r10
+ adcq %r15, %r12
+ adcq %r11, %rbx
+ movq %rbx, %rdi
+ adcq $0, %r14
+ movq %r14, -88(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ imulq %rax, %rdx
+ movq (%rcx), %rsi
+ movq %rsi, (%rsp) # 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ mulxq %rax, %rax, %r9
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ mulxq %rax, %r8, %r11
+ movq 8(%rcx), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ mulxq %rax, %rax, %r14
+ mulxq %rsi, %r15, %rsi
+ addq %rax, %rsi
+ adcq %r8, %r14
+ movq 24(%rcx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ mulxq %rax, %rbx, %r8
+ adcq %r11, %rbx
+ movq 32(%rcx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ mulxq %rax, %rax, %rcx
+ adcq %r8, %rax
+ adcq -112(%rsp), %rcx # 8-byte Folded Reload
+ adcq $0, %r9
+ addq -96(%rsp), %r15 # 8-byte Folded Reload
+ adcq %rbp, %rsi
+ adcq %r13, %r14
+ adcq %r10, %rbx
+ adcq %r12, %rax
+ adcq %rdi, %rcx
+ adcq -88(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, -96(%rsp) # 8-byte Spill
+ sbbq %r12, %r12
+ andl $1, %r12d
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 8(%rdx), %rdx
+ mulxq -40(%rsp), %rdi, %rbp # 8-byte Folded Reload
+ movq %rdi, -112(%rsp) # 8-byte Spill
+ movq %rbp, -88(%rsp) # 8-byte Spill
+ mulxq -48(%rsp), %rdi, %r13 # 8-byte Folded Reload
+ movq %rdi, -120(%rsp) # 8-byte Spill
+ mulxq -80(%rsp), %rdi, %r15 # 8-byte Folded Reload
+ movq %rdi, -128(%rsp) # 8-byte Spill
+ mulxq -64(%rsp), %r8, %rdi # 8-byte Folded Reload
+ mulxq -56(%rsp), %rbp, %r10 # 8-byte Folded Reload
+ addq %r8, %r10
+ mulxq -72(%rsp), %r9, %r11 # 8-byte Folded Reload
+ adcq %rdi, %r9
+ adcq -128(%rsp), %r11 # 8-byte Folded Reload
+ adcq -120(%rsp), %r15 # 8-byte Folded Reload
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ movq -88(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %rsi, %rbp
+ adcq %r14, %r10
+ adcq %rbx, %r9
+ adcq %rax, %r11
+ adcq %rcx, %r15
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ adcq %r12, %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rbp, %rdx
+ imulq -24(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rax, %r12 # 8-byte Folded Reload
+ movq %rax, -112(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %rax, %r14 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq 8(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ mulxq (%rsp), %rax, %r8 # 8-byte Folded Reload
+ addq %rcx, %r8
+ mulxq 16(%rsp), %rdi, %rbx # 8-byte Folded Reload
+ adcq %rsi, %rdi
+ mulxq -8(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ adcq %rbx, %rcx
+ adcq -120(%rsp), %rsi # 8-byte Folded Reload
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ adcq $0, %r12
+ addq %rbp, %rax
+ adcq %r10, %r8
+ adcq %r9, %rdi
+ adcq %r11, %rcx
+ adcq %r15, %rsi
+ adcq %r13, %r14
+ adcq -88(%rsp), %r12 # 8-byte Folded Reload
+ adcq $0, -96(%rsp) # 8-byte Folded Spill
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq -40(%rsp), %rbp, %rax # 8-byte Folded Reload
+ movq %rbp, -112(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ mulxq -48(%rsp), %rax, %r13 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq -80(%rsp), %rbp, %r15 # 8-byte Folded Reload
+ mulxq -64(%rsp), %r9, %rbx # 8-byte Folded Reload
+ mulxq -56(%rsp), %rax, %r11 # 8-byte Folded Reload
+ addq %r9, %r11
+ mulxq -72(%rsp), %r9, %r10 # 8-byte Folded Reload
+ adcq %rbx, %r9
+ adcq %rbp, %r10
+ adcq -120(%rsp), %r15 # 8-byte Folded Reload
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ movq -88(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r8, %rax
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ adcq %rdi, %r11
+ adcq %rcx, %r9
+ adcq %rsi, %r10
+ adcq %r14, %r15
+ adcq %r12, %r13
+ adcq -96(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rbp, %rdx
+ imulq -24(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rax, %r8 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %r12, %r14 # 8-byte Folded Reload
+ mulxq 8(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ mulxq (%rsp), %rax, %rbx # 8-byte Folded Reload
+ addq %rcx, %rbx
+ mulxq 16(%rsp), %rbp, %rdi # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ mulxq -8(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ adcq %rdi, %rcx
+ adcq %r12, %rsi
+ adcq -120(%rsp), %r14 # 8-byte Folded Reload
+ adcq $0, %r8
+ addq -112(%rsp), %rax # 8-byte Folded Reload
+ adcq %r11, %rbx
+ adcq %r9, %rbp
+ adcq %r10, %rcx
+ adcq %r15, %rsi
+ adcq %r13, %r14
+ adcq -88(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -112(%rsp) # 8-byte Spill
+ movq -96(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq 24(%rdx), %rdx
+ mulxq -40(%rsp), %rdi, %rax # 8-byte Folded Reload
+ movq %rdi, -96(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ mulxq -48(%rsp), %rdi, %rax # 8-byte Folded Reload
+ movq %rdi, -120(%rsp) # 8-byte Spill
+ mulxq -80(%rsp), %r15, %r12 # 8-byte Folded Reload
+ mulxq -64(%rsp), %r8, %r11 # 8-byte Folded Reload
+ mulxq -56(%rsp), %r10, %rdi # 8-byte Folded Reload
+ addq %r8, %rdi
+ mulxq -72(%rsp), %r8, %r9 # 8-byte Folded Reload
+ adcq %r11, %r8
+ adcq %r15, %r9
+ adcq -120(%rsp), %r12 # 8-byte Folded Reload
+ adcq -96(%rsp), %rax # 8-byte Folded Reload
+ movq -88(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %rbx, %r10
+ adcq %rbp, %rdi
+ adcq %rcx, %r8
+ adcq %rsi, %r9
+ adcq %r14, %r12
+ adcq -112(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -96(%rsp) # 8-byte Spill
+ adcq %r13, %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, %r14
+ movq %r10, %rdx
+ imulq -24(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rax, %r13 # 8-byte Folded Reload
+ movq %rax, -112(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %rax, %r11 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq 8(%rsp), %rbp, %rsi # 8-byte Folded Reload
+ mulxq (%rsp), %rcx, %rbx # 8-byte Folded Reload
+ addq %rbp, %rbx
+ mulxq 16(%rsp), %rbp, %rax # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ mulxq -8(%rsp), %rsi, %r15 # 8-byte Folded Reload
+ adcq %rax, %rsi
+ adcq -120(%rsp), %r15 # 8-byte Folded Reload
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %r10, %rcx
+ adcq %rdi, %rbx
+ adcq %r8, %rbp
+ adcq %r9, %rsi
+ adcq %r12, %r15
+ adcq -96(%rsp), %r11 # 8-byte Folded Reload
+ adcq -88(%rsp), %r13 # 8-byte Folded Reload
+ movq %r14, %rdi
+ adcq $0, %rdi
+ movq -32(%rsp), %rcx # 8-byte Reload
+ movq 32(%rcx), %rdx
+ mulxq -40(%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rcx, -96(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ mulxq -48(%rsp), %rax, %r12 # 8-byte Folded Reload
+ movq %rax, -112(%rsp) # 8-byte Spill
+ mulxq -80(%rsp), %rax, %r14 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq -64(%rsp), %rcx, %r9 # 8-byte Folded Reload
+ mulxq -56(%rsp), %rax, %r8 # 8-byte Folded Reload
+ addq %rcx, %r8
+ mulxq -72(%rsp), %rcx, %r10 # 8-byte Folded Reload
+ adcq %r9, %rcx
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ adcq -96(%rsp), %r12 # 8-byte Folded Reload
+ movq -88(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %rbx, %rax
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rax, %rbx
+ adcq %rbp, %r8
+ adcq %rsi, %rcx
+ adcq %r15, %r10
+ adcq %r11, %r14
+ adcq %r13, %r12
+ adcq %rdi, %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rbx, %rdx
+ imulq -24(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rax, %r15 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %r13, %r11 # 8-byte Folded Reload
+ mulxq 8(%rsp), %rsi, %rax # 8-byte Folded Reload
+ mulxq (%rsp), %rdi, %rbx # 8-byte Folded Reload
+ addq %rsi, %rbx
+ mulxq 16(%rsp), %rbp, %r9 # 8-byte Folded Reload
+ adcq %rax, %rbp
+ mulxq -8(%rsp), %rax, %rsi # 8-byte Folded Reload
+ adcq %r9, %rax
+ adcq %r13, %rsi
+ adcq -120(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r15
+ addq -112(%rsp), %rdi # 8-byte Folded Reload
+ adcq %r8, %rbx
+ adcq %rcx, %rbp
+ adcq %r10, %rax
+ adcq %r14, %rsi
+ adcq %r12, %r11
+ adcq -88(%rsp), %r15 # 8-byte Folded Reload
+ movq -96(%rsp), %r8 # 8-byte Reload
+ adcq $0, %r8
+ movq -32(%rsp), %rcx # 8-byte Reload
+ movq 40(%rcx), %rdx
+ mulxq -40(%rsp), %rdi, %rcx # 8-byte Folded Reload
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ mulxq -48(%rsp), %rdi, %rcx # 8-byte Folded Reload
+ movq %rdi, -48(%rsp) # 8-byte Spill
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ mulxq -80(%rsp), %rcx, %r14 # 8-byte Folded Reload
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ mulxq -72(%rsp), %rdi, %r12 # 8-byte Folded Reload
+ mulxq -64(%rsp), %rcx, %r10 # 8-byte Folded Reload
+ mulxq -56(%rsp), %r13, %r9 # 8-byte Folded Reload
+ addq %rcx, %r9
+ adcq %rdi, %r10
+ adcq -80(%rsp), %r12 # 8-byte Folded Reload
+ adcq -48(%rsp), %r14 # 8-byte Folded Reload
+ movq -40(%rsp), %rdx # 8-byte Reload
+ adcq -88(%rsp), %rdx # 8-byte Folded Reload
+ movq -32(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq %rbx, %r13
+ adcq %rbp, %r9
+ adcq %rax, %r10
+ adcq %rsi, %r12
+ adcq %r11, %r14
+ adcq %r15, %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ adcq %r8, %rcx
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ sbbq %rcx, %rcx
+ movq -24(%rsp), %rdx # 8-byte Reload
+ imulq %r13, %rdx
+ mulxq (%rsp), %r11, %rax # 8-byte Folded Reload
+ mulxq 8(%rsp), %rdi, %rbx # 8-byte Folded Reload
+ addq %rax, %rdi
+ mulxq 16(%rsp), %rsi, %rax # 8-byte Folded Reload
+ adcq %rbx, %rsi
+ mulxq -8(%rsp), %rbx, %rbp # 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -16(%rsp), %rax, %r15 # 8-byte Folded Reload
+ adcq %rbp, %rax
+ mulxq 24(%rsp), %rbp, %rdx # 8-byte Folded Reload
+ adcq %r15, %rbp
+ adcq $0, %rdx
+ andl $1, %ecx
+ addq %r13, %r11
+ adcq %r9, %rdi
+ adcq %r10, %rsi
+ adcq %r12, %rbx
+ adcq %r14, %rax
+ adcq -40(%rsp), %rbp # 8-byte Folded Reload
+ adcq -32(%rsp), %rdx # 8-byte Folded Reload
+ adcq $0, %rcx
+ movq %rdi, %r8
+ subq (%rsp), %r8 # 8-byte Folded Reload
+ movq %rsi, %r9
+ sbbq 8(%rsp), %r9 # 8-byte Folded Reload
+ movq %rbx, %r10
+ sbbq 16(%rsp), %r10 # 8-byte Folded Reload
+ movq %rax, %r11
+ sbbq -8(%rsp), %r11 # 8-byte Folded Reload
+ movq %rbp, %r14
+ sbbq -16(%rsp), %r14 # 8-byte Folded Reload
+ movq %rdx, %r15
+ sbbq 24(%rsp), %r15 # 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rax, %r11
+ testb %cl, %cl
+ cmovneq %rdi, %r8
+ movq -104(%rsp), %rax # 8-byte Reload
+ movq %r8, (%rax)
+ cmovneq %rsi, %r9
+ movq %r9, 8(%rax)
+ cmovneq %rbx, %r10
+ movq %r10, 16(%rax)
+ movq %r11, 24(%rax)
+ cmovneq %rbp, %r14
+ movq %r14, 32(%rax)
+ cmovneq %rdx, %r15
+ movq %r15, 40(%rax)
+ addq $32, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end83:
+ .size mcl_fp_mont6Lbmi2, .Lfunc_end83-mcl_fp_mont6Lbmi2
+
+ .globl mcl_fp_montNF6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF6Lbmi2,@function
+mcl_fp_montNF6Lbmi2: # @mcl_fp_montNF6Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rdi, -120(%rsp) # 8-byte Spill
+ movq (%rsi), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ movq (%rdx), %rbp
+ movq %rdi, %rdx
+ mulxq %rbp, %rdi, %rbx
+ movq %rax, %rdx
+ mulxq %rbp, %r9, %r14
+ movq 16(%rsi), %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ addq %rdi, %r14
+ mulxq %rbp, %rdi, %r8
+ adcq %rbx, %rdi
+ movq 24(%rsi), %rdx
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ mulxq %rbp, %rbx, %r10
+ adcq %r8, %rbx
+ movq 32(%rsi), %rdx
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ mulxq %rbp, %r8, %r11
+ adcq %r10, %r8
+ movq 40(%rsi), %rdx
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ mulxq %rbp, %rsi, %r15
+ adcq %r11, %rsi
+ adcq $0, %r15
+ movq -8(%rcx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %r9, %rdx
+ imulq %rax, %rdx
+ movq (%rcx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ mulxq %rax, %rbp, %rax
+ movq %rax, -128(%rsp) # 8-byte Spill
+ addq %r9, %rbp
+ movq 8(%rcx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulxq %rax, %r12, %r9
+ adcq %r14, %r12
+ movq 16(%rcx), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ mulxq %rax, %r14, %rax
+ adcq %rdi, %r14
+ movq 24(%rcx), %rdi
+ movq %rdi, -40(%rsp) # 8-byte Spill
+ mulxq %rdi, %r13, %rdi
+ adcq %rbx, %r13
+ movq 32(%rcx), %rbp
+ movq %rbp, -48(%rsp) # 8-byte Spill
+ mulxq %rbp, %r11, %rbx
+ adcq %r8, %r11
+ movq 40(%rcx), %rcx
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ mulxq %rcx, %r10, %rcx
+ adcq %rsi, %r10
+ adcq $0, %r15
+ addq -128(%rsp), %r12 # 8-byte Folded Reload
+ adcq %r9, %r14
+ adcq %rax, %r13
+ adcq %rdi, %r11
+ adcq %rbx, %r10
+ adcq %rcx, %r15
+ movq -72(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ mulxq -80(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ mulxq -64(%rsp), %rbx, %rax # 8-byte Folded Reload
+ addq %rcx, %rax
+ mulxq -88(%rsp), %rcx, %rdi # 8-byte Folded Reload
+ adcq %rsi, %rcx
+ mulxq -96(%rsp), %rsi, %r8 # 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -104(%rsp), %rdi, %rbp # 8-byte Folded Reload
+ movq %rbp, -128(%rsp) # 8-byte Spill
+ adcq %r8, %rdi
+ mulxq -112(%rsp), %r8, %r9 # 8-byte Folded Reload
+ adcq -128(%rsp), %r8 # 8-byte Folded Reload
+ adcq $0, %r9
+ addq %r12, %rbx
+ adcq %r14, %rax
+ adcq %r13, %rcx
+ adcq %r11, %rsi
+ adcq %r10, %rdi
+ adcq %r15, %r8
+ adcq $0, %r9
+ movq %rbx, %rdx
+ imulq -16(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -8(%rsp), %rbp, %r13 # 8-byte Folded Reload
+ addq %rbx, %rbp
+ mulxq -24(%rsp), %r11, %rbx # 8-byte Folded Reload
+ adcq %rax, %r11
+ mulxq -32(%rsp), %r14, %rax # 8-byte Folded Reload
+ adcq %rcx, %r14
+ mulxq -40(%rsp), %r10, %rcx # 8-byte Folded Reload
+ adcq %rsi, %r10
+ mulxq -48(%rsp), %r15, %rsi # 8-byte Folded Reload
+ adcq %rdi, %r15
+ mulxq -56(%rsp), %r12, %rdx # 8-byte Folded Reload
+ adcq %r8, %r12
+ adcq $0, %r9
+ addq %r13, %r11
+ adcq %rbx, %r14
+ adcq %rax, %r10
+ adcq %rcx, %r15
+ adcq %rsi, %r12
+ adcq %rdx, %r9
+ movq -72(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ mulxq -80(%rsp), %rcx, %rax # 8-byte Folded Reload
+ mulxq -64(%rsp), %r13, %rdi # 8-byte Folded Reload
+ addq %rcx, %rdi
+ mulxq -88(%rsp), %rbx, %rcx # 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -96(%rsp), %rsi, %rbp # 8-byte Folded Reload
+ adcq %rcx, %rsi
+ mulxq -104(%rsp), %rax, %rcx # 8-byte Folded Reload
+ movq %rcx, -128(%rsp) # 8-byte Spill
+ adcq %rbp, %rax
+ mulxq -112(%rsp), %r8, %rcx # 8-byte Folded Reload
+ adcq -128(%rsp), %r8 # 8-byte Folded Reload
+ adcq $0, %rcx
+ addq %r11, %r13
+ adcq %r14, %rdi
+ adcq %r10, %rbx
+ adcq %r15, %rsi
+ adcq %r12, %rax
+ adcq %r9, %r8
+ adcq $0, %rcx
+ movq %r13, %rdx
+ imulq -16(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -8(%rsp), %rbp, %r12 # 8-byte Folded Reload
+ addq %r13, %rbp
+ mulxq -24(%rsp), %r11, %rbp # 8-byte Folded Reload
+ adcq %rdi, %r11
+ mulxq -32(%rsp), %r9, %rdi # 8-byte Folded Reload
+ adcq %rbx, %r9
+ mulxq -40(%rsp), %r10, %rbx # 8-byte Folded Reload
+ adcq %rsi, %r10
+ mulxq -48(%rsp), %r14, %rsi # 8-byte Folded Reload
+ adcq %rax, %r14
+ mulxq -56(%rsp), %r15, %rax # 8-byte Folded Reload
+ adcq %r8, %r15
+ adcq $0, %rcx
+ addq %r12, %r11
+ adcq %rbp, %r9
+ adcq %rdi, %r10
+ adcq %rbx, %r14
+ adcq %rsi, %r15
+ adcq %rax, %rcx
+ movq -72(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ mulxq -80(%rsp), %rsi, %rax # 8-byte Folded Reload
+ mulxq -64(%rsp), %r13, %rbx # 8-byte Folded Reload
+ addq %rsi, %rbx
+ mulxq -88(%rsp), %rdi, %rbp # 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -96(%rsp), %rsi, %r8 # 8-byte Folded Reload
+ adcq %rbp, %rsi
+ mulxq -104(%rsp), %rax, %rbp # 8-byte Folded Reload
+ adcq %r8, %rax
+ mulxq -112(%rsp), %r8, %r12 # 8-byte Folded Reload
+ adcq %rbp, %r8
+ adcq $0, %r12
+ addq %r11, %r13
+ adcq %r9, %rbx
+ adcq %r10, %rdi
+ adcq %r14, %rsi
+ adcq %r15, %rax
+ adcq %rcx, %r8
+ adcq $0, %r12
+ movq %r13, %rdx
+ imulq -16(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -8(%rsp), %rbp, %rcx # 8-byte Folded Reload
+ addq %r13, %rbp
+ mulxq -24(%rsp), %r11, %rbp # 8-byte Folded Reload
+ adcq %rbx, %r11
+ mulxq -32(%rsp), %r9, %rbx # 8-byte Folded Reload
+ adcq %rdi, %r9
+ mulxq -40(%rsp), %r10, %rdi # 8-byte Folded Reload
+ adcq %rsi, %r10
+ mulxq -48(%rsp), %r14, %rsi # 8-byte Folded Reload
+ adcq %rax, %r14
+ mulxq -56(%rsp), %r15, %rax # 8-byte Folded Reload
+ adcq %r8, %r15
+ adcq $0, %r12
+ addq %rcx, %r11
+ adcq %rbp, %r9
+ adcq %rbx, %r10
+ adcq %rdi, %r14
+ adcq %rsi, %r15
+ adcq %rax, %r12
+ movq -72(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ mulxq -80(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ mulxq -64(%rsp), %r13, %rax # 8-byte Folded Reload
+ addq %rsi, %rax
+ mulxq -88(%rsp), %rbx, %rsi # 8-byte Folded Reload
+ adcq %rcx, %rbx
+ mulxq -96(%rsp), %rdi, %rcx # 8-byte Folded Reload
+ adcq %rsi, %rdi
+ mulxq -104(%rsp), %rsi, %rbp # 8-byte Folded Reload
+ adcq %rcx, %rsi
+ mulxq -112(%rsp), %r8, %rcx # 8-byte Folded Reload
+ adcq %rbp, %r8
+ adcq $0, %rcx
+ addq %r11, %r13
+ adcq %r9, %rax
+ adcq %r10, %rbx
+ adcq %r14, %rdi
+ adcq %r15, %rsi
+ adcq %r12, %r8
+ adcq $0, %rcx
+ movq %r13, %rdx
+ imulq -16(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -8(%rsp), %rbp, %r9 # 8-byte Folded Reload
+ addq %r13, %rbp
+ mulxq -24(%rsp), %r13, %rbp # 8-byte Folded Reload
+ adcq %rax, %r13
+ mulxq -32(%rsp), %r11, %rax # 8-byte Folded Reload
+ adcq %rbx, %r11
+ mulxq -40(%rsp), %r10, %rbx # 8-byte Folded Reload
+ adcq %rdi, %r10
+ mulxq -48(%rsp), %r14, %rdi # 8-byte Folded Reload
+ adcq %rsi, %r14
+ mulxq -56(%rsp), %rsi, %rdx # 8-byte Folded Reload
+ adcq %r8, %rsi
+ adcq $0, %rcx
+ addq %r9, %r13
+ adcq %rbp, %r11
+ adcq %rax, %r10
+ adcq %rbx, %r14
+ adcq %rdi, %rsi
+ adcq %rdx, %rcx
+ movq -72(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ mulxq -80(%rsp), %rdi, %rax # 8-byte Folded Reload
+ mulxq -64(%rsp), %r8, %rbx # 8-byte Folded Reload
+ addq %rdi, %rbx
+ mulxq -88(%rsp), %rdi, %rbp # 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -96(%rsp), %r15, %rax # 8-byte Folded Reload
+ adcq %rbp, %r15
+ mulxq -104(%rsp), %r12, %rbp # 8-byte Folded Reload
+ adcq %rax, %r12
+ mulxq -112(%rsp), %r9, %rax # 8-byte Folded Reload
+ adcq %rbp, %r9
+ adcq $0, %rax
+ addq %r13, %r8
+ adcq %r11, %rbx
+ adcq %r10, %rdi
+ adcq %r14, %r15
+ adcq %rsi, %r12
+ adcq %rcx, %r9
+ adcq $0, %rax
+ movq -16(%rsp), %rdx # 8-byte Reload
+ imulq %r8, %rdx
+ mulxq -8(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ movq %rsi, -16(%rsp) # 8-byte Spill
+ addq %r8, %rcx
+ movq -24(%rsp), %r11 # 8-byte Reload
+ mulxq %r11, %r8, %rcx
+ movq %rcx, -64(%rsp) # 8-byte Spill
+ adcq %rbx, %r8
+ movq -32(%rsp), %r10 # 8-byte Reload
+ mulxq %r10, %rsi, %rcx
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ adcq %rdi, %rsi
+ movq -40(%rsp), %r13 # 8-byte Reload
+ mulxq %r13, %rdi, %rcx
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ adcq %r15, %rdi
+ movq -48(%rsp), %rcx # 8-byte Reload
+ mulxq %rcx, %r15, %rbx
+ adcq %r12, %r15
+ movq -56(%rsp), %r14 # 8-byte Reload
+ mulxq %r14, %r12, %rbp
+ adcq %r9, %r12
+ adcq $0, %rax
+ addq -16(%rsp), %r8 # 8-byte Folded Reload
+ adcq -64(%rsp), %rsi # 8-byte Folded Reload
+ adcq -72(%rsp), %rdi # 8-byte Folded Reload
+ adcq -80(%rsp), %r15 # 8-byte Folded Reload
+ adcq %rbx, %r12
+ adcq %rbp, %rax
+ movq %r8, %rbp
+ subq -8(%rsp), %rbp # 8-byte Folded Reload
+ movq %rsi, %rbx
+ sbbq %r11, %rbx
+ movq %rdi, %r11
+ sbbq %r10, %r11
+ movq %r15, %r10
+ sbbq %r13, %r10
+ movq %r12, %r9
+ sbbq %rcx, %r9
+ movq %rax, %rcx
+ sbbq %r14, %rcx
+ movq %rcx, %rdx
+ sarq $63, %rdx
+ cmovsq %r8, %rbp
+ movq -120(%rsp), %rdx # 8-byte Reload
+ movq %rbp, (%rdx)
+ cmovsq %rsi, %rbx
+ movq %rbx, 8(%rdx)
+ cmovsq %rdi, %r11
+ movq %r11, 16(%rdx)
+ cmovsq %r15, %r10
+ movq %r10, 24(%rdx)
+ cmovsq %r12, %r9
+ movq %r9, 32(%rdx)
+ cmovsq %rax, %rcx
+ movq %rcx, 40(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end84:
+ .size mcl_fp_montNF6Lbmi2, .Lfunc_end84-mcl_fp_montNF6Lbmi2
+
+ .globl mcl_fp_montRed6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed6Lbmi2,@function
+mcl_fp_montRed6Lbmi2: # @mcl_fp_montRed6Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ pushq %rax
+ movq %rdx, %rcx
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq (%rcx), %rdi
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq (%rsi), %r14
+ movq %r14, %rdx
+ imulq %rax, %rdx
+ movq 40(%rcx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulxq %rax, %rbx, %r12
+ movq 32(%rcx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ mulxq %rax, %r10, %r11
+ movq 24(%rcx), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rbp
+ movq %rbp, -40(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, (%rsp) # 8-byte Spill
+ mulxq %rax, %r15, %r8
+ mulxq %rbp, %r13, %rbp
+ mulxq %rcx, %rax, %r9
+ mulxq %rdi, %rdx, %rcx
+ addq %rax, %rcx
+ adcq %r13, %r9
+ adcq %r15, %rbp
+ adcq %r10, %r8
+ adcq %rbx, %r11
+ adcq $0, %r12
+ addq %r14, %rdx
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r9
+ adcq 24(%rsi), %rbp
+ adcq 32(%rsi), %r8
+ adcq 40(%rsi), %r11
+ movq %r11, -88(%rsp) # 8-byte Spill
+ adcq 48(%rsi), %r12
+ movq %r12, -80(%rsp) # 8-byte Spill
+ movq 88(%rsi), %r10
+ movq 80(%rsi), %rdx
+ movq 72(%rsi), %rdi
+ movq 64(%rsi), %rax
+ movq 56(%rsi), %rsi
+ adcq $0, %rsi
+ movq %rsi, -112(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ adcq $0, %r10
+ movq %r10, -48(%rsp) # 8-byte Spill
+ sbbq %r12, %r12
+ andl $1, %r12d
+ movq %rcx, %rdx
+ imulq -56(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -24(%rsp), %rax, %r13 # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %rax, %r15 # 8-byte Folded Reload
+ movq %rax, -128(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %r11, %r14 # 8-byte Folded Reload
+ mulxq -40(%rsp), %rbx, %r10 # 8-byte Folded Reload
+ mulxq (%rsp), %rsi, %rdi # 8-byte Folded Reload
+ mulxq -8(%rsp), %rdx, %rax # 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %rbx, %rdi
+ adcq %r11, %r10
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ adcq -120(%rsp), %r15 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rcx, %rdx
+ adcq %r9, %rax
+ adcq %rbp, %rdi
+ adcq %r8, %r10
+ adcq -88(%rsp), %r14 # 8-byte Folded Reload
+ adcq -80(%rsp), %r15 # 8-byte Folded Reload
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ adcq $0, -96(%rsp) # 8-byte Folded Spill
+ adcq $0, -72(%rsp) # 8-byte Folded Spill
+ adcq $0, -64(%rsp) # 8-byte Folded Spill
+ adcq $0, -48(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq -56(%rsp), %r11 # 8-byte Reload
+ imulq %r11, %rdx
+ mulxq -24(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -112(%rsp) # 8-byte Spill
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -120(%rsp) # 8-byte Spill
+ movq %rcx, -88(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rcx, %rbx # 8-byte Folded Reload
+ movq %rcx, -128(%rsp) # 8-byte Spill
+ mulxq -40(%rsp), %rcx, %r9 # 8-byte Folded Reload
+ mulxq (%rsp), %rsi, %rbp # 8-byte Folded Reload
+ mulxq -8(%rsp), %rdx, %r8 # 8-byte Folded Reload
+ addq %rsi, %r8
+ adcq %rcx, %rbp
+ adcq -128(%rsp), %r9 # 8-byte Folded Reload
+ adcq -120(%rsp), %rbx # 8-byte Folded Reload
+ movq -88(%rsp), %rsi # 8-byte Reload
+ adcq -112(%rsp), %rsi # 8-byte Folded Reload
+ movq -80(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq %rax, %rdx
+ adcq %rdi, %r8
+ adcq %r10, %rbp
+ adcq %r14, %r9
+ adcq %r15, %rbx
+ adcq %r13, %rsi
+ movq %rsi, -88(%rsp) # 8-byte Spill
+ adcq -96(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ adcq $0, -72(%rsp) # 8-byte Folded Spill
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ adcq $0, -48(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %r8, %rdx
+ imulq %r11, %rdx
+ mulxq -24(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -64(%rsp) # 8-byte Spill
+ movq %rcx, -96(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %rcx, %r11 # 8-byte Folded Reload
+ movq %rcx, -112(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %r10, %r14 # 8-byte Folded Reload
+ mulxq -40(%rsp), %r13, %r15 # 8-byte Folded Reload
+ mulxq (%rsp), %rsi, %rdi # 8-byte Folded Reload
+ mulxq -8(%rsp), %rdx, %rcx # 8-byte Folded Reload
+ addq %rsi, %rcx
+ adcq %r13, %rdi
+ adcq %r10, %r15
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ adcq -64(%rsp), %r11 # 8-byte Folded Reload
+ movq -96(%rsp), %rsi # 8-byte Reload
+ adcq $0, %rsi
+ addq %r8, %rdx
+ adcq %rbp, %rcx
+ adcq %r9, %rdi
+ adcq %rbx, %r15
+ adcq -88(%rsp), %r14 # 8-byte Folded Reload
+ adcq -80(%rsp), %r11 # 8-byte Folded Reload
+ adcq -72(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -96(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq -48(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ adcq $0, %r12
+ movq %rcx, %rdx
+ imulq -56(%rsp), %rdx # 8-byte Folded Reload
+ mulxq -24(%rsp), %rbp, %rsi # 8-byte Folded Reload
+ movq %rbp, -48(%rsp) # 8-byte Spill
+ movq %rsi, -72(%rsp) # 8-byte Spill
+ mulxq -16(%rsp), %rbp, %rsi # 8-byte Folded Reload
+ movq %rbp, -88(%rsp) # 8-byte Spill
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rsi, %r13 # 8-byte Folded Reload
+ movq %rsi, -112(%rsp) # 8-byte Spill
+ movq -40(%rsp), %r9 # 8-byte Reload
+ mulxq %r9, %r10, %rbp
+ mulxq (%rsp), %rsi, %r8 # 8-byte Folded Reload
+ mulxq -8(%rsp), %rdx, %rbx # 8-byte Folded Reload
+ addq %rsi, %rbx
+ adcq %r10, %r8
+ adcq -112(%rsp), %rbp # 8-byte Folded Reload
+ adcq -88(%rsp), %r13 # 8-byte Folded Reload
+ movq -80(%rsp), %r10 # 8-byte Reload
+ adcq -48(%rsp), %r10 # 8-byte Folded Reload
+ movq -72(%rsp), %rsi # 8-byte Reload
+ adcq $0, %rsi
+ addq %rcx, %rdx
+ adcq %rdi, %rbx
+ adcq %r15, %r8
+ adcq %r14, %rbp
+ adcq %r11, %r13
+ adcq -96(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, -80(%rsp) # 8-byte Spill
+ adcq -64(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -72(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ adcq $0, %r12
+ movq -56(%rsp), %rdx # 8-byte Reload
+ imulq %rbx, %rdx
+ mulxq -24(%rsp), %rax, %r10 # 8-byte Folded Reload
+ movq %rax, -56(%rsp) # 8-byte Spill
+ mulxq %r9, %rsi, %r14
+ mulxq -8(%rsp), %r11, %rdi # 8-byte Folded Reload
+ mulxq (%rsp), %rax, %r9 # 8-byte Folded Reload
+ addq %rdi, %rax
+ adcq %rsi, %r9
+ movq -32(%rsp), %r15 # 8-byte Reload
+ mulxq %r15, %rsi, %rdi
+ adcq %r14, %rsi
+ mulxq -16(%rsp), %rdx, %r14 # 8-byte Folded Reload
+ adcq %rdi, %rdx
+ adcq -56(%rsp), %r14 # 8-byte Folded Reload
+ adcq $0, %r10
+ addq %rbx, %r11
+ adcq %r8, %rax
+ adcq %rbp, %r9
+ adcq %r13, %rsi
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ adcq -72(%rsp), %r14 # 8-byte Folded Reload
+ adcq -48(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r12
+ movq %rax, %rcx
+ subq -8(%rsp), %rcx # 8-byte Folded Reload
+ movq %r9, %rdi
+ sbbq (%rsp), %rdi # 8-byte Folded Reload
+ movq %rsi, %rbp
+ sbbq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rdx, %rbx
+ sbbq %r15, %rbx
+ movq %r14, %r8
+ sbbq -16(%rsp), %r8 # 8-byte Folded Reload
+ movq %r10, %r15
+ sbbq -24(%rsp), %r15 # 8-byte Folded Reload
+ sbbq $0, %r12
+ andl $1, %r12d
+ cmovneq %r10, %r15
+ testb %r12b, %r12b
+ cmovneq %rax, %rcx
+ movq -104(%rsp), %rax # 8-byte Reload
+ movq %rcx, (%rax)
+ cmovneq %r9, %rdi
+ movq %rdi, 8(%rax)
+ cmovneq %rsi, %rbp
+ movq %rbp, 16(%rax)
+ cmovneq %rdx, %rbx
+ movq %rbx, 24(%rax)
+ cmovneq %r14, %r8
+ movq %r8, 32(%rax)
+ movq %r15, 40(%rax)
+ addq $8, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end85:
+ .size mcl_fp_montRed6Lbmi2, .Lfunc_end85-mcl_fp_montRed6Lbmi2
+
+ .globl mcl_fp_addPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre6Lbmi2,@function
+mcl_fp_addPre6Lbmi2: # @mcl_fp_addPre6Lbmi2
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 40(%rsi), %r11
+ movq 32(%rdx), %r9
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %r14
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r10, %rax
+ movq %rax, 24(%rdi)
+ adcq %r9, %r14
+ movq %r14, 32(%rdi)
+ adcq %r8, %r11
+ movq %r11, 40(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end86:
+ .size mcl_fp_addPre6Lbmi2, .Lfunc_end86-mcl_fp_addPre6Lbmi2
+
+ .globl mcl_fp_subPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre6Lbmi2,@function
+mcl_fp_subPre6Lbmi2: # @mcl_fp_subPre6Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 40(%rsi), %r9
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rsi
+ movq 24(%rdx), %r14
+ movq 32(%rdx), %r15
+ sbbq 16(%rdx), %rcx
+ movq %rbx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r14, %r11
+ movq %r11, 24(%rdi)
+ sbbq %r15, %r10
+ movq %r10, 32(%rdi)
+ sbbq %r8, %r9
+ movq %r9, 40(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end87:
+ .size mcl_fp_subPre6Lbmi2, .Lfunc_end87-mcl_fp_subPre6Lbmi2
+
+ .globl mcl_fp_shr1_6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_6Lbmi2,@function
+mcl_fp_shr1_6Lbmi2: # @mcl_fp_shr1_6Lbmi2
+# BB#0:
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %rdx
+ movq 16(%rsi), %rax
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rcx
+ movq %rcx, (%rdi)
+ shrdq $1, %rax, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rdx, %rax
+ movq %rax, 16(%rdi)
+ shrdq $1, %r9, %rdx
+ movq %rdx, 24(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 32(%rdi)
+ shrq %r8
+ movq %r8, 40(%rdi)
+ retq
+.Lfunc_end88:
+ .size mcl_fp_shr1_6Lbmi2, .Lfunc_end88-mcl_fp_shr1_6Lbmi2
+
+ .globl mcl_fp_add6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add6Lbmi2,@function
+mcl_fp_add6Lbmi2: # @mcl_fp_add6Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r14
+ movq 40(%rsi), %r8
+ movq 32(%rdx), %r15
+ movq 24(%rdx), %rbx
+ movq 24(%rsi), %r10
+ movq 32(%rsi), %r9
+ movq 16(%rdx), %r11
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r11
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r11, 16(%rdi)
+ adcq %rbx, %r10
+ movq %r10, 24(%rdi)
+ adcq %r15, %r9
+ movq %r9, 32(%rdi)
+ adcq %r14, %r8
+ movq %r8, 40(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r11
+ sbbq 24(%rcx), %r10
+ sbbq 32(%rcx), %r9
+ sbbq 40(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB89_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r11, 16(%rdi)
+ movq %r10, 24(%rdi)
+ movq %r9, 32(%rdi)
+ movq %r8, 40(%rdi)
+.LBB89_2: # %carry
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end89:
+ .size mcl_fp_add6Lbmi2, .Lfunc_end89-mcl_fp_add6Lbmi2
+
+ .globl mcl_fp_addNF6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF6Lbmi2,@function
+mcl_fp_addNF6Lbmi2: # @mcl_fp_addNF6Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 32(%rdx), %r9
+ movq 24(%rdx), %r10
+ movq 16(%rdx), %r11
+ movq (%rdx), %r15
+ movq 8(%rdx), %r14
+ addq (%rsi), %r15
+ adcq 8(%rsi), %r14
+ adcq 16(%rsi), %r11
+ adcq 24(%rsi), %r10
+ adcq 32(%rsi), %r9
+ adcq 40(%rsi), %r8
+ movq %r15, %rsi
+ subq (%rcx), %rsi
+ movq %r14, %rbx
+ sbbq 8(%rcx), %rbx
+ movq %r11, %rdx
+ sbbq 16(%rcx), %rdx
+ movq %r10, %r13
+ sbbq 24(%rcx), %r13
+ movq %r9, %r12
+ sbbq 32(%rcx), %r12
+ movq %r8, %rax
+ sbbq 40(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r15, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r14, %rbx
+ movq %rbx, 8(%rdi)
+ cmovsq %r11, %rdx
+ movq %rdx, 16(%rdi)
+ cmovsq %r10, %r13
+ movq %r13, 24(%rdi)
+ cmovsq %r9, %r12
+ movq %r12, 32(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end90:
+ .size mcl_fp_addNF6Lbmi2, .Lfunc_end90-mcl_fp_addNF6Lbmi2
+
+ .globl mcl_fp_sub6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub6Lbmi2,@function
+mcl_fp_sub6Lbmi2: # @mcl_fp_sub6Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r14
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r10
+ movq 16(%rsi), %r11
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %rsi
+ movq 24(%rdx), %r15
+ movq 32(%rdx), %r12
+ sbbq 16(%rdx), %r11
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r11, 16(%rdi)
+ sbbq %r15, %r10
+ movq %r10, 24(%rdi)
+ sbbq %r12, %r9
+ movq %r9, 32(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 40(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB91_2
+# BB#1: # %carry
+ movq 40(%rcx), %r14
+ movq 32(%rcx), %r15
+ movq 24(%rcx), %r12
+ movq 8(%rcx), %rbx
+ movq 16(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %rsi, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r11, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r10, %r12
+ movq %r12, 24(%rdi)
+ adcq %r9, %r15
+ movq %r15, 32(%rdi)
+ adcq %r8, %r14
+ movq %r14, 40(%rdi)
+.LBB91_2: # %nocarry
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end91:
+ .size mcl_fp_sub6Lbmi2, .Lfunc_end91-mcl_fp_sub6Lbmi2
+
+ .globl mcl_fp_subNF6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF6Lbmi2,@function
+mcl_fp_subNF6Lbmi2: # @mcl_fp_subNF6Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 40(%rsi), %r15
+ movq 32(%rsi), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %r11
+ movq 8(%rsi), %r14
+ subq (%rdx), %r11
+ sbbq 8(%rdx), %r14
+ sbbq 16(%rdx), %r10
+ sbbq 24(%rdx), %r9
+ sbbq 32(%rdx), %r8
+ sbbq 40(%rdx), %r15
+ movq %r15, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rbx
+ addq %rbx, %rbx
+ movq %rdx, %rsi
+ adcq %rsi, %rsi
+ andq 8(%rcx), %rsi
+ movq %r15, %rax
+ shrq $63, %rax
+ orq %rbx, %rax
+ andq (%rcx), %rax
+ movq 40(%rcx), %r12
+ andq %rdx, %r12
+ movq 32(%rcx), %r13
+ andq %rdx, %r13
+ movq 24(%rcx), %rbx
+ andq %rdx, %rbx
+ andq 16(%rcx), %rdx
+ addq %r11, %rax
+ movq %rax, (%rdi)
+ adcq %r14, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r10, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r9, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r8, %r13
+ movq %r13, 32(%rdi)
+ adcq %r15, %r12
+ movq %r12, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end92:
+ .size mcl_fp_subNF6Lbmi2, .Lfunc_end92-mcl_fp_subNF6Lbmi2
+
+ .globl mcl_fpDbl_add6Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add6Lbmi2,@function
+mcl_fpDbl_add6Lbmi2: # @mcl_fpDbl_add6Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 88(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 80(%rdx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 72(%rdx), %r14
+ movq 64(%rdx), %r15
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %r13
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %rbp
+ adcq 32(%rdx), %r13
+ movq 56(%rdx), %r11
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rdx
+ movq %rbx, (%rdi)
+ movq 88(%rsi), %r8
+ movq %rax, 8(%rdi)
+ movq 80(%rsi), %r10
+ movq %r12, 16(%rdi)
+ movq 72(%rsi), %r12
+ movq %rbp, 24(%rdi)
+ movq 40(%rsi), %rax
+ adcq %rdx, %rax
+ movq 64(%rsi), %rdx
+ movq %r13, 32(%rdi)
+ movq 56(%rsi), %r13
+ movq 48(%rsi), %rbp
+ adcq %r9, %rbp
+ movq %rax, 40(%rdi)
+ adcq %r11, %r13
+ adcq %r15, %rdx
+ adcq %r14, %r12
+ adcq -16(%rsp), %r10 # 8-byte Folded Reload
+ adcq -8(%rsp), %r8 # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rbp, %rsi
+ subq (%rcx), %rsi
+ movq %r13, %rbx
+ sbbq 8(%rcx), %rbx
+ movq %rdx, %r9
+ sbbq 16(%rcx), %r9
+ movq %r12, %r11
+ sbbq 24(%rcx), %r11
+ movq %r10, %r14
+ sbbq 32(%rcx), %r14
+ movq %r8, %r15
+ sbbq 40(%rcx), %r15
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rbp, %rsi
+ movq %rsi, 48(%rdi)
+ testb %al, %al
+ cmovneq %r13, %rbx
+ movq %rbx, 56(%rdi)
+ cmovneq %rdx, %r9
+ movq %r9, 64(%rdi)
+ cmovneq %r12, %r11
+ movq %r11, 72(%rdi)
+ cmovneq %r10, %r14
+ movq %r14, 80(%rdi)
+ cmovneq %r8, %r15
+ movq %r15, 88(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end93:
+ .size mcl_fpDbl_add6Lbmi2, .Lfunc_end93-mcl_fpDbl_add6Lbmi2
+
+ .globl mcl_fpDbl_sub6Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub6Lbmi2,@function
+mcl_fpDbl_sub6Lbmi2: # @mcl_fpDbl_sub6Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 88(%rdx), %r9
+ movq 80(%rdx), %r10
+ movq 72(%rdx), %r14
+ movq 16(%rsi), %r8
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %eax, %eax
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r8
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 64(%rdx), %r13
+ movq %r15, (%rdi)
+ movq 56(%rdx), %rbp
+ movq %r11, 8(%rdi)
+ movq 48(%rdx), %r15
+ movq 40(%rdx), %rdx
+ movq %r8, 16(%rdi)
+ movq 88(%rsi), %r8
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %rdx, %rbx
+ movq 80(%rsi), %r11
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %rdx
+ sbbq %r15, %rdx
+ movq 72(%rsi), %r15
+ movq %rbx, 40(%rdi)
+ movq 64(%rsi), %r12
+ movq 56(%rsi), %rsi
+ sbbq %rbp, %rsi
+ sbbq %r13, %r12
+ sbbq %r14, %r15
+ sbbq %r10, %r11
+ sbbq %r9, %r8
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%rcx), %r14
+ cmoveq %rax, %r14
+ testb %bpl, %bpl
+ movq 16(%rcx), %r9
+ cmoveq %rax, %r9
+ movq 8(%rcx), %rbp
+ cmoveq %rax, %rbp
+ movq 40(%rcx), %r10
+ cmoveq %rax, %r10
+ movq 32(%rcx), %rbx
+ cmoveq %rax, %rbx
+ cmovneq 24(%rcx), %rax
+ addq %rdx, %r14
+ movq %r14, 48(%rdi)
+ adcq %rsi, %rbp
+ movq %rbp, 56(%rdi)
+ adcq %r12, %r9
+ movq %r9, 64(%rdi)
+ adcq %r15, %rax
+ movq %rax, 72(%rdi)
+ adcq %r11, %rbx
+ movq %rbx, 80(%rdi)
+ adcq %r8, %r10
+ movq %r10, 88(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end94:
+ .size mcl_fpDbl_sub6Lbmi2, .Lfunc_end94-mcl_fpDbl_sub6Lbmi2
+
+ .globl mcl_fp_mulUnitPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre7Lbmi2,@function
+mcl_fp_mulUnitPre7Lbmi2: # @mcl_fp_mulUnitPre7Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ mulxq 48(%rsi), %r8, %r11
+ mulxq 40(%rsi), %r9, %r13
+ mulxq 32(%rsi), %r10, %rcx
+ mulxq 8(%rsi), %r12, %r14
+ mulxq (%rsi), %r15, %rbx
+ addq %r12, %rbx
+ mulxq 24(%rsi), %r12, %rax
+ mulxq 16(%rsi), %rdx, %rsi
+ movq %r15, (%rdi)
+ movq %rbx, 8(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r12, %rsi
+ movq %rsi, 24(%rdi)
+ adcq %r10, %rax
+ movq %rax, 32(%rdi)
+ adcq %r9, %rcx
+ movq %rcx, 40(%rdi)
+ adcq %r8, %r13
+ movq %r13, 48(%rdi)
+ adcq $0, %r11
+ movq %r11, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end95:
+ .size mcl_fp_mulUnitPre7Lbmi2, .Lfunc_end95-mcl_fp_mulUnitPre7Lbmi2
+
+ .globl mcl_fpDbl_mulPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre7Lbmi2,@function
+mcl_fpDbl_mulPre7Lbmi2: # @mcl_fpDbl_mulPre7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r14
+ movq %r14, -8(%rsp) # 8-byte Spill
+ movq %rsi, %r8
+ movq %rdi, %r13
+ movq %r13, -16(%rsp) # 8-byte Spill
+ movq (%r8), %rcx
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ movq 8(%r8), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq (%r14), %rsi
+ movq %rax, %rdx
+ mulxq %rsi, %rbp, %rax
+ movq %rcx, %rdx
+ mulxq %rsi, %rdx, %rcx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq 24(%r8), %rdi
+ movq %rdi, -32(%rsp) # 8-byte Spill
+ movq 16(%r8), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ addq %rbp, %rcx
+ mulxq %rsi, %rbx, %rbp
+ adcq %rax, %rbx
+ movq %rdi, %rdx
+ mulxq %rsi, %r12, %rax
+ adcq %rbp, %r12
+ movq 32(%r8), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rsi, %r9, %rbp
+ adcq %rax, %r9
+ movq 40(%r8), %rdi
+ movq %rdi, %rdx
+ mulxq %rsi, %r10, %rax
+ adcq %rbp, %r10
+ movq 48(%r8), %r15
+ movq %r15, %rdx
+ mulxq %rsi, %rsi, %r11
+ adcq %rax, %rsi
+ movq -64(%rsp), %rax # 8-byte Reload
+ movq %rax, (%r13)
+ adcq $0, %r11
+ movq 8(%r14), %r13
+ movq -56(%rsp), %rdx # 8-byte Reload
+ mulxq %r13, %r14, %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ addq %rcx, %r14
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %r13, %rcx, %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ adcq %rbx, %rcx
+ movq -40(%rsp), %rdx # 8-byte Reload
+ mulxq %r13, %rbx, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ adcq %r12, %rbx
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %r13, %rbp, %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ adcq %r9, %rbp
+ movq -48(%rsp), %rdx # 8-byte Reload
+ mulxq %r13, %rax, %r9
+ adcq %r10, %rax
+ movq %rdi, %rdx
+ mulxq %r13, %r10, %rdi
+ adcq %rsi, %r10
+ movq %r15, %rdx
+ mulxq %r13, %r13, %rdx
+ adcq %r11, %r13
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -56(%rsp), %rcx # 8-byte Folded Reload
+ adcq -24(%rsp), %rbx # 8-byte Folded Reload
+ adcq -40(%rsp), %rbp # 8-byte Folded Reload
+ adcq -32(%rsp), %rax # 8-byte Folded Reload
+ adcq %r9, %r10
+ movq -16(%rsp), %rsi # 8-byte Reload
+ movq %r14, 8(%rsi)
+ adcq %rdi, %r13
+ adcq %rdx, %r12
+ movq (%r8), %rsi
+ movq %rsi, -32(%rsp) # 8-byte Spill
+ movq 8(%r8), %r11
+ movq %r11, -24(%rsp) # 8-byte Spill
+ movq -8(%rsp), %rdx # 8-byte Reload
+ movq 16(%rdx), %rdi
+ movq %rsi, %rdx
+ mulxq %rdi, %r9, %rdx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ addq %rcx, %r9
+ movq %r11, %rdx
+ mulxq %rdi, %r14, %rcx
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ adcq %rbx, %r14
+ movq 16(%r8), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ mulxq %rdi, %rsi, %rcx
+ movq %rcx, -88(%rsp) # 8-byte Spill
+ adcq %rbp, %rsi
+ movq 24(%r8), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rdi, %rbp, %rcx
+ movq %rcx, -96(%rsp) # 8-byte Spill
+ adcq %rax, %rbp
+ movq 32(%r8), %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ mulxq %rdi, %r11, %rax
+ movq %rax, -104(%rsp) # 8-byte Spill
+ adcq %r10, %r11
+ movq 40(%r8), %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ mulxq %rdi, %r15, %rax
+ adcq %r13, %r15
+ movq 48(%r8), %r13
+ movq %r13, %rdx
+ mulxq %rdi, %rcx, %rdx
+ adcq %r12, %rcx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq -72(%rsp), %r14 # 8-byte Folded Reload
+ adcq -80(%rsp), %rsi # 8-byte Folded Reload
+ adcq -88(%rsp), %rbp # 8-byte Folded Reload
+ adcq -96(%rsp), %r11 # 8-byte Folded Reload
+ adcq -104(%rsp), %r15 # 8-byte Folded Reload
+ adcq %rax, %rcx
+ adcq %rdx, %rbx
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq %r9, 16(%rax)
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdi
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r9, %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ addq %r14, %r9
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rax, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %rsi, %rax
+ movq -40(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r14, %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ adcq %rbp, %r14
+ movq -48(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r10, %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ adcq %r11, %r10
+ movq -56(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rbp, %rsi
+ adcq %r15, %rbp
+ movq -64(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r11, %r15
+ adcq %rcx, %r11
+ movq %r13, %rdx
+ mulxq %rdi, %r13, %rcx
+ adcq %rbx, %r13
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -32(%rsp), %rax # 8-byte Folded Reload
+ adcq -24(%rsp), %r14 # 8-byte Folded Reload
+ adcq -40(%rsp), %r10 # 8-byte Folded Reload
+ adcq -48(%rsp), %rbp # 8-byte Folded Reload
+ adcq %rsi, %r11
+ movq -16(%rsp), %rdi # 8-byte Reload
+ movq %r9, 24(%rdi)
+ adcq %r15, %r13
+ adcq %rcx, %r12
+ movq (%r8), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 8(%r8), %rbx
+ movq %rbx, -24(%rsp) # 8-byte Spill
+ movq -8(%rsp), %rcx # 8-byte Reload
+ movq 32(%rcx), %rcx
+ mulxq %rcx, %rsi, %rdx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ addq %rax, %rsi
+ movq %rbx, %rdx
+ mulxq %rcx, %r9, %rax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ adcq %r14, %r9
+ movq 16(%r8), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ mulxq %rcx, %rax, %rdx
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ adcq %r10, %rax
+ movq 24(%r8), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rcx, %r15, %rdx
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ adcq %rbp, %r15
+ movq 32(%r8), %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ mulxq %rcx, %r10, %rbp
+ adcq %r11, %r10
+ movq 40(%r8), %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ mulxq %rcx, %r11, %rbx
+ adcq %r13, %r11
+ movq 48(%r8), %rdx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ mulxq %rcx, %r14, %rcx
+ adcq %r12, %r14
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -80(%rsp), %r9 # 8-byte Folded Reload
+ adcq -88(%rsp), %rax # 8-byte Folded Reload
+ adcq -96(%rsp), %r15 # 8-byte Folded Reload
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ adcq %rbp, %r11
+ adcq %rbx, %r14
+ adcq %rcx, %r12
+ movq %rsi, 32(%rdi)
+ movq -8(%rsp), %rsi # 8-byte Reload
+ movq 40(%rsi), %rdi
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r13, %rcx
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ addq %r9, %r13
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rcx, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %rax, %rcx
+ movq -40(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rax, %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ adcq %r15, %rax
+ movq -48(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rbx, %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ adcq %r10, %rbx
+ movq -56(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %rbp, %r15
+ adcq %r11, %rbp
+ movq -64(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r9, %r11
+ adcq %r14, %r9
+ movq -72(%rsp), %rdx # 8-byte Reload
+ mulxq %rdi, %r10, %rdx
+ adcq %r12, %r10
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ addq -32(%rsp), %rcx # 8-byte Folded Reload
+ adcq -24(%rsp), %rax # 8-byte Folded Reload
+ adcq -40(%rsp), %rbx # 8-byte Folded Reload
+ adcq -48(%rsp), %rbp # 8-byte Folded Reload
+ adcq %r15, %r9
+ movq -16(%rsp), %r14 # 8-byte Reload
+ movq %r13, 40(%r14)
+ adcq %r11, %r10
+ adcq %rdx, %rdi
+ movq 48(%rsi), %rdx
+ mulxq (%r8), %r11, %rsi
+ movq %rsi, -8(%rsp) # 8-byte Spill
+ addq %rcx, %r11
+ mulxq 8(%r8), %rsi, %r15
+ adcq %rax, %rsi
+ mulxq 16(%r8), %rcx, %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ adcq %rbx, %rcx
+ mulxq 24(%r8), %rbx, %r12
+ adcq %rbp, %rbx
+ mulxq 32(%r8), %rbp, %r13
+ adcq %r9, %rbp
+ mulxq 40(%r8), %rax, %r9
+ adcq %r10, %rax
+ mulxq 48(%r8), %rdx, %r8
+ adcq %rdi, %rdx
+ sbbq %r10, %r10
+ andl $1, %r10d
+ addq -8(%rsp), %rsi # 8-byte Folded Reload
+ adcq %r15, %rcx
+ movq %r11, 48(%r14)
+ movq %rsi, 56(%r14)
+ movq %rcx, 64(%r14)
+ adcq -24(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 72(%r14)
+ adcq %r12, %rbp
+ movq %rbp, 80(%r14)
+ adcq %r13, %rax
+ movq %rax, 88(%r14)
+ adcq %r9, %rdx
+ movq %rdx, 96(%r14)
+ adcq %r8, %r10
+ movq %r10, 104(%r14)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end96:
+ .size mcl_fpDbl_mulPre7Lbmi2, .Lfunc_end96-mcl_fpDbl_mulPre7Lbmi2
+
+ .globl mcl_fpDbl_sqrPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre7Lbmi2,@function
+mcl_fpDbl_sqrPre7Lbmi2: # @mcl_fpDbl_sqrPre7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rax
+ mulxq %rcx, %r8, %r10
+ movq 24(%rsi), %rbx
+ movq %rbx, -32(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rcx, %r12, %rbp
+ movq %rbp, -16(%rsp) # 8-byte Spill
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %rdi
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ addq %r12, %rdi
+ adcq %rbp, %r8
+ movq %rbx, %rdx
+ mulxq %rcx, %rbp, %r9
+ adcq %r10, %rbp
+ movq 32(%rsi), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ mulxq %rcx, %r11, %r14
+ adcq %r9, %r11
+ movq 40(%rsi), %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ mulxq %rcx, %r10, %r15
+ adcq %r14, %r10
+ movq 48(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rcx, %rcx, %r13
+ adcq %r15, %rcx
+ movq -8(%rsp), %rdx # 8-byte Reload
+ movq -48(%rsp), %rbx # 8-byte Reload
+ movq %rbx, (%rdx)
+ adcq $0, %r13
+ addq %r12, %rdi
+ movq %rax, %rdx
+ mulxq %rax, %r12, %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ adcq %r8, %r12
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r8, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %rbp, %r8
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r9, %rbp
+ adcq %r11, %r9
+ movq -40(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r15, %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ adcq %r10, %r15
+ movq -56(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r11, %rbx
+ adcq %rcx, %r11
+ movq %r14, %rdx
+ mulxq %rax, %r14, %rax
+ adcq %r13, %r14
+ sbbq %r13, %r13
+ andl $1, %r13d
+ addq -16(%rsp), %r12 # 8-byte Folded Reload
+ adcq -48(%rsp), %r8 # 8-byte Folded Reload
+ adcq -24(%rsp), %r9 # 8-byte Folded Reload
+ adcq %rbp, %r15
+ movq -8(%rsp), %rcx # 8-byte Reload
+ movq %rdi, 8(%rcx)
+ adcq -32(%rsp), %r11 # 8-byte Folded Reload
+ adcq %rbx, %r14
+ adcq %rax, %r13
+ movq (%rsi), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rbx
+ mulxq %rbx, %rax, %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ addq %r12, %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rcx, %rdx
+ mulxq %rbx, %r10, %rax
+ movq %rax, -72(%rsp) # 8-byte Spill
+ adcq %r8, %r10
+ movq %rbx, %rdx
+ mulxq %rbx, %r12, %rax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ adcq %r9, %r12
+ movq 24(%rsi), %rax
+ movq %rax, %rdx
+ mulxq %rbx, %r8, %rdi
+ movq %rdi, -56(%rsp) # 8-byte Spill
+ adcq %r8, %r15
+ movq 32(%rsi), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ mulxq %rbx, %rcx, %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ adcq %r11, %rcx
+ movq 40(%rsi), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rbx, %rbp, %r11
+ adcq %r14, %rbp
+ movq 48(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rbx, %r9, %rdx
+ adcq %r13, %r9
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq -64(%rsp), %r10 # 8-byte Folded Reload
+ adcq -72(%rsp), %r12 # 8-byte Folded Reload
+ adcq -80(%rsp), %r15 # 8-byte Folded Reload
+ adcq %rdi, %rcx
+ adcq -88(%rsp), %rbp # 8-byte Folded Reload
+ adcq %r11, %r9
+ adcq %rdx, %rbx
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %rdi, %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ addq %r10, %rdi
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r11, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %r12, %r11
+ adcq %r8, %r15
+ movq %rax, %rdx
+ mulxq %rax, %r8, %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ adcq %rcx, %r8
+ movq -40(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r13, %rcx
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ adcq %rbp, %r13
+ movq -48(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r12, %rbp
+ adcq %r9, %r12
+ movq %r14, %rdx
+ mulxq %rax, %rcx, %rax
+ adcq %rbx, %rcx
+ sbbq %r10, %r10
+ andl $1, %r10d
+ addq -32(%rsp), %r11 # 8-byte Folded Reload
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ adcq -56(%rsp), %r8 # 8-byte Folded Reload
+ adcq -64(%rsp), %r13 # 8-byte Folded Reload
+ movq -8(%rsp), %rdx # 8-byte Reload
+ movq -16(%rsp), %rbx # 8-byte Reload
+ movq %rbx, 16(%rdx)
+ movq %rdi, 24(%rdx)
+ adcq -40(%rsp), %r12 # 8-byte Folded Reload
+ adcq %rbp, %rcx
+ adcq %rax, %r10
+ movq (%rsi), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rbx
+ mulxq %rbx, %rax, %rdx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ addq %r11, %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rdi, %rdx
+ mulxq %rbx, %r9, %rax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ adcq %r15, %r9
+ movq 16(%rsi), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ mulxq %rbx, %r15, %rax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ adcq %r8, %r15
+ movq 24(%rsi), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ mulxq %rbx, %r8, %rbp
+ adcq %r13, %r8
+ movq %rbx, %rdx
+ mulxq %rbx, %r13, %r14
+ adcq %r12, %r13
+ movq 40(%rsi), %rax
+ movq %rax, %rdx
+ mulxq %rbx, %rdx, %rdi
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rdi, -56(%rsp) # 8-byte Spill
+ adcq %rdx, %rcx
+ movq 48(%rsi), %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ mulxq %rbx, %r11, %rdx
+ adcq %r10, %r11
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -80(%rsp), %r9 # 8-byte Folded Reload
+ adcq -88(%rsp), %r15 # 8-byte Folded Reload
+ adcq -96(%rsp), %r8 # 8-byte Folded Reload
+ adcq %rbp, %r13
+ adcq %r14, %rcx
+ adcq %rdi, %r11
+ adcq %rdx, %r12
+ movq -32(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r14, %rdi
+ addq %r9, %r14
+ movq -24(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %rbx, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %r15, %rbx
+ movq -40(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %rbp, %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ adcq %r8, %rbp
+ movq -48(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %r10, %r15
+ adcq %r13, %r10
+ adcq -72(%rsp), %rcx # 8-byte Folded Reload
+ movq %rax, %rdx
+ mulxq %rax, %r9, %r13
+ adcq %r11, %r9
+ movq -64(%rsp), %rdx # 8-byte Reload
+ mulxq %rax, %rax, %r11
+ adcq %r12, %rax
+ sbbq %r8, %r8
+ andl $1, %r8d
+ addq %rdi, %rbx
+ adcq -24(%rsp), %rbp # 8-byte Folded Reload
+ adcq -32(%rsp), %r10 # 8-byte Folded Reload
+ adcq %r15, %rcx
+ movq -8(%rsp), %rdi # 8-byte Reload
+ movq -16(%rsp), %rdx # 8-byte Reload
+ movq %rdx, 32(%rdi)
+ movq %r14, 40(%rdi)
+ adcq -56(%rsp), %r9 # 8-byte Folded Reload
+ adcq %r13, %rax
+ adcq %r11, %r8
+ movq 48(%rsi), %rdx
+ mulxq (%rsi), %r12, %r11
+ addq %rbx, %r12
+ mulxq 8(%rsi), %rbx, %r14
+ adcq %rbp, %rbx
+ mulxq 16(%rsi), %rbp, %r15
+ adcq %r10, %rbp
+ mulxq 24(%rsi), %rdi, %r10
+ adcq %rcx, %rdi
+ mulxq 32(%rsi), %rcx, %r13
+ adcq %r9, %rcx
+ mulxq 40(%rsi), %rsi, %r9
+ adcq %rax, %rsi
+ mulxq %rdx, %rdx, %rax
+ adcq %r8, %rdx
+ sbbq %r8, %r8
+ andl $1, %r8d
+ addq %r11, %rbx
+ adcq %r14, %rbp
+ movq -8(%rsp), %r11 # 8-byte Reload
+ movq %r12, 48(%r11)
+ movq %rbx, 56(%r11)
+ movq %rbp, 64(%r11)
+ adcq %r15, %rdi
+ movq %rdi, 72(%r11)
+ adcq %r10, %rcx
+ movq %rcx, 80(%r11)
+ adcq %r13, %rsi
+ movq %rsi, 88(%r11)
+ adcq %r9, %rdx
+ movq %rdx, 96(%r11)
+ adcq %rax, %r8
+ movq %r8, 104(%r11)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end97:
+ .size mcl_fpDbl_sqrPre7Lbmi2, .Lfunc_end97-mcl_fpDbl_sqrPre7Lbmi2
+
+ .globl mcl_fp_mont7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont7Lbmi2,@function
+mcl_fp_mont7Lbmi2: # @mcl_fp_mont7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $56, %rsp
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rdi, -120(%rsp) # 8-byte Spill
+ movq 48(%rsi), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %rdx, %r8
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ movq 40(%rsi), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ mulxq %rax, %rdx, %r9
+ movq %rdx, 40(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rdx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq 24(%rsi), %r11
+ movq %r11, -64(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r10
+ movq %r10, -56(%rsp) # 8-byte Spill
+ movq (%rsi), %r15
+ movq %r15, -40(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rsi
+ movq %rsi, -48(%rsp) # 8-byte Spill
+ mulxq %rax, %r13, %rdi
+ movq %r11, %rdx
+ mulxq %rax, %r14, %rbp
+ movq %r10, %rdx
+ mulxq %rax, %r12, %rbx
+ movq %rsi, %rdx
+ mulxq %rax, %r10, %rsi
+ movq %r15, %rdx
+ mulxq %rax, %r15, %r11
+ addq %r10, %r11
+ adcq %r12, %rsi
+ movq %rsi, -112(%rsp) # 8-byte Spill
+ adcq %r14, %rbx
+ movq %rbx, -104(%rsp) # 8-byte Spill
+ adcq %r13, %rbp
+ movq %rbp, -96(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, -80(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, %r13
+ movq -8(%rcx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq %r15, %rdx
+ imulq %rax, %rdx
+ movq (%rcx), %rdi
+ movq %rdi, 24(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ mulxq %rax, %rbx, %r9
+ movq 16(%rcx), %rsi
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ mulxq %rsi, %r14, %rbp
+ movq 8(%rcx), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ mulxq %rax, %rsi, %rax
+ mulxq %rdi, %r8, %r12
+ addq %rsi, %r12
+ adcq %r14, %rax
+ movq %rax, %rsi
+ movq 24(%rcx), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ mulxq %rax, %r10, %r14
+ adcq %rbp, %r10
+ adcq %rbx, %r14
+ movq 40(%rcx), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ mulxq %rax, %rbp, %rdi
+ adcq %r9, %rbp
+ movq 48(%rcx), %rax
+ movq %rax, (%rsp) # 8-byte Spill
+ mulxq %rax, %rax, %rbx
+ adcq %rdi, %rax
+ adcq $0, %rbx
+ addq %r15, %r8
+ adcq %r11, %r12
+ adcq -112(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -112(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ adcq -96(%rsp), %r14 # 8-byte Folded Reload
+ adcq -88(%rsp), %rbp # 8-byte Folded Reload
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ adcq %r13, %rbx
+ sbbq %r11, %r11
+ andl $1, %r11d
+ movq -16(%rsp), %rcx # 8-byte Reload
+ movq 8(%rcx), %rdx
+ mulxq -24(%rsp), %rdi, %rcx # 8-byte Folded Reload
+ movq %rdi, -96(%rsp) # 8-byte Spill
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -128(%rsp) # 8-byte Spill
+ movq %rcx, -88(%rsp) # 8-byte Spill
+ mulxq -48(%rsp), %r9, %r8 # 8-byte Folded Reload
+ mulxq -40(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -104(%rsp) # 8-byte Spill
+ addq %r9, %rcx
+ movq %rcx, %rdi
+ mulxq -56(%rsp), %rcx, %r9 # 8-byte Folded Reload
+ adcq %r8, %rcx
+ movq %rcx, %rsi
+ mulxq -64(%rsp), %r13, %rcx # 8-byte Folded Reload
+ adcq %r9, %r13
+ mulxq -72(%rsp), %r8, %r15 # 8-byte Folded Reload
+ adcq %rcx, %r8
+ adcq -128(%rsp), %r15 # 8-byte Folded Reload
+ movq -88(%rsp), %rdx # 8-byte Reload
+ adcq -96(%rsp), %rdx # 8-byte Folded Reload
+ movq -80(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ movq -104(%rsp), %r9 # 8-byte Reload
+ addq %r12, %r9
+ movq %r9, -104(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, %r12
+ adcq %r10, %rsi
+ movq %rsi, -128(%rsp) # 8-byte Spill
+ adcq %r14, %r13
+ adcq %rbp, %r8
+ adcq %rax, %r15
+ adcq %rbx, %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ adcq %r11, %rcx
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %r9, %rdx
+ imulq -8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq (%rsp), %r10, %rax # 8-byte Folded Reload
+ movq %rax, -96(%rsp) # 8-byte Spill
+ mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload
+ mulxq 32(%rsp), %rdi, %rbx # 8-byte Folded Reload
+ mulxq 24(%rsp), %r14, %r9 # 8-byte Folded Reload
+ addq %rdi, %r9
+ mulxq 40(%rsp), %rbp, %r11 # 8-byte Folded Reload
+ adcq %rbx, %rbp
+ adcq %rcx, %r11
+ mulxq 48(%rsp), %rbx, %rsi # 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq 8(%rsp), %rax, %rcx # 8-byte Folded Reload
+ adcq %rsi, %rax
+ adcq %r10, %rcx
+ movq -96(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq -104(%rsp), %r14 # 8-byte Folded Reload
+ adcq %r12, %r9
+ adcq -128(%rsp), %rbp # 8-byte Folded Reload
+ adcq %r13, %r11
+ adcq %r8, %rbx
+ adcq %r15, %rax
+ adcq -88(%rsp), %rcx # 8-byte Folded Reload
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ adcq $0, -112(%rsp) # 8-byte Folded Spill
+ movq -16(%rsp), %rdx # 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq -24(%rsp), %rdi, %rsi # 8-byte Folded Reload
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rdi, %rsi # 8-byte Folded Reload
+ movq %rdi, -128(%rsp) # 8-byte Spill
+ movq %rsi, -88(%rsp) # 8-byte Spill
+ mulxq -56(%rsp), %rdi, %r10 # 8-byte Folded Reload
+ mulxq -48(%rsp), %rsi, %r13 # 8-byte Folded Reload
+ mulxq -40(%rsp), %r8, %r15 # 8-byte Folded Reload
+ addq %rsi, %r15
+ adcq %rdi, %r13
+ mulxq -64(%rsp), %r12, %rsi # 8-byte Folded Reload
+ adcq %r10, %r12
+ mulxq -72(%rsp), %r10, %r14 # 8-byte Folded Reload
+ adcq %rsi, %r10
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ movq -88(%rsp), %rsi # 8-byte Reload
+ adcq -104(%rsp), %rsi # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r9, %r8
+ movq %r8, -104(%rsp) # 8-byte Spill
+ adcq %rbp, %r15
+ adcq %r11, %r13
+ adcq %rbx, %r12
+ adcq %rax, %r10
+ adcq %rcx, %r14
+ adcq -96(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -88(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, %rbx
+ movq %r8, %rdx
+ imulq -8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rcx, -112(%rsp) # 8-byte Spill
+ movq %rax, -96(%rsp) # 8-byte Spill
+ mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload
+ mulxq 32(%rsp), %rbp, %rsi # 8-byte Folded Reload
+ mulxq 24(%rsp), %r11, %r8 # 8-byte Folded Reload
+ addq %rbp, %r8
+ mulxq 40(%rsp), %rbp, %r9 # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ adcq %rcx, %r9
+ mulxq 48(%rsp), %rsi, %rdi # 8-byte Folded Reload
+ adcq %rax, %rsi
+ mulxq 8(%rsp), %rax, %rcx # 8-byte Folded Reload
+ adcq %rdi, %rax
+ adcq -112(%rsp), %rcx # 8-byte Folded Reload
+ movq -96(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq -104(%rsp), %r11 # 8-byte Folded Reload
+ adcq %r15, %r8
+ adcq %r13, %rbp
+ adcq %r12, %r9
+ adcq %r10, %rsi
+ adcq %r14, %rax
+ adcq -88(%rsp), %rcx # 8-byte Folded Reload
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ adcq $0, %rbx
+ movq %rbx, -88(%rsp) # 8-byte Spill
+ movq -16(%rsp), %rdx # 8-byte Reload
+ movq 24(%rdx), %rdx
+ mulxq -24(%rsp), %rbx, %rdi # 8-byte Folded Reload
+ movq %rbx, -112(%rsp) # 8-byte Spill
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rdi, %r13 # 8-byte Folded Reload
+ movq %rdi, -128(%rsp) # 8-byte Spill
+ mulxq -56(%rsp), %r10, %r11 # 8-byte Folded Reload
+ mulxq -48(%rsp), %rdi, %r15 # 8-byte Folded Reload
+ mulxq -40(%rsp), %rbx, %r12 # 8-byte Folded Reload
+ movq %rbx, -104(%rsp) # 8-byte Spill
+ addq %rdi, %r12
+ adcq %r10, %r15
+ mulxq -64(%rsp), %rbx, %rdi # 8-byte Folded Reload
+ adcq %r11, %rbx
+ mulxq -72(%rsp), %r10, %r14 # 8-byte Folded Reload
+ adcq %rdi, %r10
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ movq -104(%rsp), %rdi # 8-byte Reload
+ addq %r8, %rdi
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ adcq %rbp, %r12
+ adcq %r9, %r15
+ adcq %rsi, %rbx
+ adcq %rax, %r10
+ adcq %rcx, %r14
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ adcq -88(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rdi, %rdx
+ imulq -8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rcx, -112(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload
+ mulxq 32(%rsp), %rbp, %rsi # 8-byte Folded Reload
+ mulxq 24(%rsp), %r11, %r8 # 8-byte Folded Reload
+ addq %rbp, %r8
+ mulxq 40(%rsp), %rbp, %r9 # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ adcq %rcx, %r9
+ mulxq 48(%rsp), %rsi, %rdi # 8-byte Folded Reload
+ adcq %rax, %rsi
+ mulxq 8(%rsp), %rax, %rcx # 8-byte Folded Reload
+ adcq %rdi, %rax
+ adcq -112(%rsp), %rcx # 8-byte Folded Reload
+ movq -88(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq -104(%rsp), %r11 # 8-byte Folded Reload
+ adcq %r12, %r8
+ adcq %r15, %rbp
+ adcq %rbx, %r9
+ adcq %r10, %rsi
+ adcq %r14, %rax
+ adcq %r13, %rcx
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ adcq $0, -96(%rsp) # 8-byte Folded Spill
+ movq -16(%rsp), %rdx # 8-byte Reload
+ movq 32(%rdx), %rdx
+ mulxq -24(%rsp), %rbx, %rdi # 8-byte Folded Reload
+ movq %rbx, -104(%rsp) # 8-byte Spill
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rdi, %r11 # 8-byte Folded Reload
+ movq %rdi, -112(%rsp) # 8-byte Spill
+ mulxq -56(%rsp), %r10, %r13 # 8-byte Folded Reload
+ mulxq -48(%rsp), %rdi, %r15 # 8-byte Folded Reload
+ mulxq -40(%rsp), %rbx, %r12 # 8-byte Folded Reload
+ addq %rdi, %r12
+ adcq %r10, %r15
+ mulxq -64(%rsp), %r10, %rdi # 8-byte Folded Reload
+ adcq %r13, %r10
+ mulxq -72(%rsp), %r13, %r14 # 8-byte Folded Reload
+ adcq %rdi, %r13
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ adcq -104(%rsp), %r11 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r8, %rbx
+ movq %rbx, -112(%rsp) # 8-byte Spill
+ adcq %rbp, %r12
+ adcq %r9, %r15
+ adcq %rsi, %r10
+ adcq %rax, %r13
+ adcq %rcx, %r14
+ adcq -88(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, -128(%rsp) # 8-byte Spill
+ adcq -96(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbx, %rdx
+ imulq -8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rcx, -88(%rsp) # 8-byte Spill
+ movq %rax, -96(%rsp) # 8-byte Spill
+ mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload
+ mulxq 32(%rsp), %rbp, %rsi # 8-byte Folded Reload
+ mulxq 24(%rsp), %r9, %r11 # 8-byte Folded Reload
+ addq %rbp, %r11
+ mulxq 40(%rsp), %rbp, %r8 # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ adcq %rcx, %r8
+ mulxq 48(%rsp), %rsi, %rdi # 8-byte Folded Reload
+ adcq %rax, %rsi
+ mulxq 8(%rsp), %rax, %rcx # 8-byte Folded Reload
+ adcq %rdi, %rax
+ adcq -88(%rsp), %rcx # 8-byte Folded Reload
+ movq -96(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq -112(%rsp), %r9 # 8-byte Folded Reload
+ adcq %r12, %r11
+ adcq %r15, %rbp
+ adcq %r10, %r8
+ adcq %r13, %rsi
+ adcq %r14, %rax
+ adcq -128(%rsp), %rcx # 8-byte Folded Reload
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ adcq $0, -104(%rsp) # 8-byte Folded Spill
+ movq -16(%rsp), %rdx # 8-byte Reload
+ movq 40(%rdx), %rdx
+ mulxq -24(%rsp), %rbx, %rdi # 8-byte Folded Reload
+ movq %rbx, -112(%rsp) # 8-byte Spill
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rbx, %rdi # 8-byte Folded Reload
+ movq %rbx, -128(%rsp) # 8-byte Spill
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ mulxq -56(%rsp), %rbx, %r10 # 8-byte Folded Reload
+ mulxq -48(%rsp), %rdi, %r13 # 8-byte Folded Reload
+ mulxq -40(%rsp), %r9, %r12 # 8-byte Folded Reload
+ addq %rdi, %r12
+ adcq %rbx, %r13
+ mulxq -64(%rsp), %r15, %rdi # 8-byte Folded Reload
+ adcq %r10, %r15
+ mulxq -72(%rsp), %r10, %r14 # 8-byte Folded Reload
+ adcq %rdi, %r10
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ movq -88(%rsp), %rdi # 8-byte Reload
+ adcq -112(%rsp), %rdi # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r11, %r9
+ movq %r9, -112(%rsp) # 8-byte Spill
+ adcq %rbp, %r12
+ adcq %r8, %r13
+ adcq %rsi, %r15
+ adcq %rax, %r10
+ adcq %rcx, %r14
+ adcq -96(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %r9, %rdx
+ imulq -8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rcx, -128(%rsp) # 8-byte Spill
+ movq %rax, -96(%rsp) # 8-byte Spill
+ mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload
+ mulxq 32(%rsp), %rdi, %rsi # 8-byte Folded Reload
+ mulxq 24(%rsp), %r11, %rbx # 8-byte Folded Reload
+ addq %rdi, %rbx
+ mulxq 40(%rsp), %r8, %r9 # 8-byte Folded Reload
+ adcq %rsi, %r8
+ adcq %rcx, %r9
+ mulxq 48(%rsp), %rdi, %rbp # 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq 8(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ adcq %rbp, %rcx
+ adcq -128(%rsp), %rsi # 8-byte Folded Reload
+ movq -96(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq -112(%rsp), %r11 # 8-byte Folded Reload
+ adcq %r12, %rbx
+ adcq %r13, %r8
+ adcq %r15, %r9
+ adcq %r10, %rdi
+ adcq %r14, %rcx
+ adcq -88(%rsp), %rsi # 8-byte Folded Reload
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq -104(%rsp), %r12 # 8-byte Reload
+ adcq $0, %r12
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ mulxq -24(%rsp), %rbp, %rax # 8-byte Folded Reload
+ movq %rbp, -80(%rsp) # 8-byte Spill
+ movq %rax, -16(%rsp) # 8-byte Spill
+ mulxq -32(%rsp), %rbp, %rax # 8-byte Folded Reload
+ movq %rbp, -88(%rsp) # 8-byte Spill
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulxq -72(%rsp), %rbp, %rax # 8-byte Folded Reload
+ movq %rbp, -72(%rsp) # 8-byte Spill
+ movq %rax, -32(%rsp) # 8-byte Spill
+ mulxq -64(%rsp), %r13, %rbp # 8-byte Folded Reload
+ mulxq -56(%rsp), %r14, %r15 # 8-byte Folded Reload
+ mulxq -48(%rsp), %rax, %r11 # 8-byte Folded Reload
+ mulxq -40(%rsp), %rdx, %r10 # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ addq %rax, %r10
+ adcq %r14, %r11
+ adcq %r13, %r15
+ adcq -72(%rsp), %rbp # 8-byte Folded Reload
+ movq -32(%rsp), %r14 # 8-byte Reload
+ adcq -88(%rsp), %r14 # 8-byte Folded Reload
+ movq -24(%rsp), %rdx # 8-byte Reload
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq -16(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ movq -40(%rsp), %r13 # 8-byte Reload
+ addq %rbx, %r13
+ movq %r13, -40(%rsp) # 8-byte Spill
+ adcq %r8, %r10
+ adcq %r9, %r11
+ adcq %rdi, %r15
+ adcq %rcx, %rbp
+ movq %rbp, -48(%rsp) # 8-byte Spill
+ adcq %rsi, %r14
+ movq %r14, -32(%rsp) # 8-byte Spill
+ adcq -96(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq %r12, %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ sbbq %rdi, %rdi
+ movq -8(%rsp), %rdx # 8-byte Reload
+ imulq %r13, %rdx
+ mulxq 16(%rsp), %rbp, %rsi # 8-byte Folded Reload
+ mulxq 32(%rsp), %rcx, %rbx # 8-byte Folded Reload
+ mulxq 24(%rsp), %r13, %rax # 8-byte Folded Reload
+ addq %rcx, %rax
+ mulxq 40(%rsp), %rcx, %r9 # 8-byte Folded Reload
+ adcq %rbx, %rcx
+ adcq %rbp, %r9
+ mulxq 48(%rsp), %rbp, %rbx # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ mulxq 8(%rsp), %rsi, %r14 # 8-byte Folded Reload
+ adcq %rbx, %rsi
+ mulxq (%rsp), %rdx, %rbx # 8-byte Folded Reload
+ adcq %r14, %rdx
+ adcq $0, %rbx
+ andl $1, %edi
+ addq -40(%rsp), %r13 # 8-byte Folded Reload
+ adcq %r10, %rax
+ adcq %r11, %rcx
+ adcq %r15, %r9
+ adcq -48(%rsp), %rbp # 8-byte Folded Reload
+ adcq -32(%rsp), %rsi # 8-byte Folded Reload
+ adcq -24(%rsp), %rdx # 8-byte Folded Reload
+ adcq -16(%rsp), %rbx # 8-byte Folded Reload
+ adcq $0, %rdi
+ movq %rax, %r8
+ subq 24(%rsp), %r8 # 8-byte Folded Reload
+ movq %rcx, %r10
+ sbbq 32(%rsp), %r10 # 8-byte Folded Reload
+ movq %r9, %r11
+ sbbq 40(%rsp), %r11 # 8-byte Folded Reload
+ movq %rbp, %r14
+ sbbq 16(%rsp), %r14 # 8-byte Folded Reload
+ movq %rsi, %r15
+ sbbq 48(%rsp), %r15 # 8-byte Folded Reload
+ movq %rdx, %r12
+ sbbq 8(%rsp), %r12 # 8-byte Folded Reload
+ movq %rbx, %r13
+ sbbq (%rsp), %r13 # 8-byte Folded Reload
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %rbx, %r13
+ testb %dil, %dil
+ cmovneq %rax, %r8
+ movq -120(%rsp), %rax # 8-byte Reload
+ movq %r8, (%rax)
+ cmovneq %rcx, %r10
+ movq %r10, 8(%rax)
+ cmovneq %r9, %r11
+ movq %r11, 16(%rax)
+ cmovneq %rbp, %r14
+ movq %r14, 24(%rax)
+ cmovneq %rsi, %r15
+ movq %r15, 32(%rax)
+ cmovneq %rdx, %r12
+ movq %r12, 40(%rax)
+ movq %r13, 48(%rax)
+ addq $56, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end98:
+ .size mcl_fp_mont7Lbmi2, .Lfunc_end98-mcl_fp_mont7Lbmi2
+
+ .globl mcl_fp_montNF7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF7Lbmi2,@function
+mcl_fp_montNF7Lbmi2: # @mcl_fp_montNF7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $40, %rsp
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq (%rsi), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -48(%rsp) # 8-byte Spill
+ movq (%rdx), %rbp
+ movq %rdi, %rdx
+ mulxq %rbp, %rdi, %rbx
+ movq %rax, %rdx
+ mulxq %rbp, %r8, %r14
+ movq 16(%rsi), %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ addq %rdi, %r14
+ mulxq %rbp, %r15, %rax
+ adcq %rbx, %r15
+ movq 24(%rsi), %rdx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ mulxq %rbp, %rbx, %rdi
+ adcq %rax, %rbx
+ movq 32(%rsi), %rdx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ mulxq %rbp, %r11, %rax
+ adcq %rdi, %r11
+ movq 40(%rsi), %rdx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ mulxq %rbp, %r9, %rdi
+ adcq %rax, %r9
+ movq 48(%rsi), %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ mulxq %rbp, %r10, %rbp
+ adcq %rdi, %r10
+ adcq $0, %rbp
+ movq -8(%rcx), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq %r8, %rdx
+ imulq %rax, %rdx
+ movq (%rcx), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ mulxq %rax, %rax, %rsi
+ movq %rsi, -96(%rsp) # 8-byte Spill
+ addq %r8, %rax
+ movq 8(%rcx), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ mulxq %rax, %r8, %rsi
+ movq %rsi, -112(%rsp) # 8-byte Spill
+ adcq %r14, %r8
+ movq 16(%rcx), %rax
+ movq %rax, (%rsp) # 8-byte Spill
+ mulxq %rax, %rsi, %r13
+ adcq %r15, %rsi
+ movq 24(%rcx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ mulxq %rax, %r12, %rax
+ adcq %rbx, %r12
+ movq 32(%rcx), %rdi
+ movq %rdi, -16(%rsp) # 8-byte Spill
+ mulxq %rdi, %r15, %rbx
+ adcq %r11, %r15
+ movq 40(%rcx), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ mulxq %rdi, %r14, %rdi
+ adcq %r9, %r14
+ movq 48(%rcx), %rcx
+ movq %rcx, 32(%rsp) # 8-byte Spill
+ mulxq %rcx, %r11, %rcx
+ adcq %r10, %r11
+ adcq $0, %rbp
+ addq -96(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -96(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -112(%rsp) # 8-byte Spill
+ adcq %r13, %r12
+ adcq %rax, %r15
+ adcq %rbx, %r14
+ adcq %rdi, %r11
+ adcq %rcx, %rbp
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ mulxq -48(%rsp), %rcx, %rsi # 8-byte Folded Reload
+ mulxq -32(%rsp), %r13, %rax # 8-byte Folded Reload
+ addq %rcx, %rax
+ mulxq -56(%rsp), %rcx, %rdi # 8-byte Folded Reload
+ adcq %rsi, %rcx
+ mulxq -64(%rsp), %rsi, %r8 # 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -72(%rsp), %rdi, %r9 # 8-byte Folded Reload
+ adcq %r8, %rdi
+ mulxq -80(%rsp), %r8, %rbx # 8-byte Folded Reload
+ adcq %r9, %r8
+ mulxq -88(%rsp), %r9, %r10 # 8-byte Folded Reload
+ adcq %rbx, %r9
+ adcq $0, %r10
+ addq -96(%rsp), %r13 # 8-byte Folded Reload
+ adcq -112(%rsp), %rax # 8-byte Folded Reload
+ adcq %r12, %rcx
+ adcq %r15, %rsi
+ adcq %r14, %rdi
+ adcq %r11, %r8
+ adcq %rbp, %r9
+ adcq $0, %r10
+ movq %r13, %rdx
+ imulq 8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rbp, %rbx # 8-byte Folded Reload
+ movq %rbx, -96(%rsp) # 8-byte Spill
+ addq %r13, %rbp
+ mulxq 16(%rsp), %rbp, %r14 # 8-byte Folded Reload
+ adcq %rax, %rbp
+ mulxq (%rsp), %rax, %r11 # 8-byte Folded Reload
+ adcq %rcx, %rax
+ mulxq -8(%rsp), %r12, %rcx # 8-byte Folded Reload
+ adcq %rsi, %r12
+ mulxq -16(%rsp), %r15, %rbx # 8-byte Folded Reload
+ adcq %rdi, %r15
+ mulxq -24(%rsp), %r13, %rdi # 8-byte Folded Reload
+ adcq %r8, %r13
+ mulxq 32(%rsp), %rsi, %rdx # 8-byte Folded Reload
+ adcq %r9, %rsi
+ adcq $0, %r10
+ addq -96(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, -96(%rsp) # 8-byte Spill
+ adcq %r14, %rax
+ movq %rax, -112(%rsp) # 8-byte Spill
+ adcq %r11, %r12
+ adcq %rcx, %r15
+ adcq %rbx, %r13
+ adcq %rdi, %rsi
+ adcq %rdx, %r10
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ mulxq -48(%rsp), %rcx, %rax # 8-byte Folded Reload
+ mulxq -32(%rsp), %r14, %rdi # 8-byte Folded Reload
+ addq %rcx, %rdi
+ mulxq -56(%rsp), %rbp, %rcx # 8-byte Folded Reload
+ adcq %rax, %rbp
+ mulxq -64(%rsp), %rbx, %r8 # 8-byte Folded Reload
+ adcq %rcx, %rbx
+ mulxq -72(%rsp), %rax, %r9 # 8-byte Folded Reload
+ adcq %r8, %rax
+ mulxq -80(%rsp), %r8, %rcx # 8-byte Folded Reload
+ movq %rcx, -120(%rsp) # 8-byte Spill
+ adcq %r9, %r8
+ mulxq -88(%rsp), %r9, %r11 # 8-byte Folded Reload
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r11
+ addq -96(%rsp), %r14 # 8-byte Folded Reload
+ adcq -112(%rsp), %rdi # 8-byte Folded Reload
+ adcq %r12, %rbp
+ adcq %r15, %rbx
+ adcq %r13, %rax
+ adcq %rsi, %r8
+ adcq %r10, %r9
+ adcq $0, %r11
+ movq %r14, %rdx
+ imulq 8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rcx, -96(%rsp) # 8-byte Spill
+ addq %r14, %rsi
+ mulxq 16(%rsp), %rsi, %r13 # 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq (%rsp), %rdi, %r15 # 8-byte Folded Reload
+ adcq %rbp, %rdi
+ mulxq -8(%rsp), %rcx, %rbp # 8-byte Folded Reload
+ adcq %rbx, %rcx
+ mulxq -16(%rsp), %r14, %rbx # 8-byte Folded Reload
+ adcq %rax, %r14
+ mulxq -24(%rsp), %r12, %rax # 8-byte Folded Reload
+ adcq %r8, %r12
+ mulxq 32(%rsp), %r10, %rdx # 8-byte Folded Reload
+ adcq %r9, %r10
+ adcq $0, %r11
+ addq -96(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -96(%rsp) # 8-byte Spill
+ adcq %r13, %rdi
+ movq %rdi, -112(%rsp) # 8-byte Spill
+ adcq %r15, %rcx
+ adcq %rbp, %r14
+ adcq %rbx, %r12
+ adcq %rax, %r10
+ adcq %rdx, %r11
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ mulxq -48(%rsp), %rsi, %rax # 8-byte Folded Reload
+ mulxq -32(%rsp), %r15, %rbp # 8-byte Folded Reload
+ addq %rsi, %rbp
+ mulxq -56(%rsp), %rbx, %rdi # 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -64(%rsp), %rsi, %rax # 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -72(%rsp), %rdi, %r9 # 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -80(%rsp), %r8, %rax # 8-byte Folded Reload
+ adcq %r9, %r8
+ mulxq -88(%rsp), %r9, %r13 # 8-byte Folded Reload
+ adcq %rax, %r9
+ adcq $0, %r13
+ addq -96(%rsp), %r15 # 8-byte Folded Reload
+ adcq -112(%rsp), %rbp # 8-byte Folded Reload
+ adcq %rcx, %rbx
+ adcq %r14, %rsi
+ adcq %r12, %rdi
+ adcq %r10, %r8
+ adcq %r11, %r9
+ adcq $0, %r13
+ movq %r15, %rdx
+ imulq 8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rax, -96(%rsp) # 8-byte Spill
+ addq %r15, %rcx
+ mulxq 16(%rsp), %rcx, %r11 # 8-byte Folded Reload
+ adcq %rbp, %rcx
+ mulxq (%rsp), %rbp, %r10 # 8-byte Folded Reload
+ adcq %rbx, %rbp
+ mulxq -8(%rsp), %rax, %rbx # 8-byte Folded Reload
+ adcq %rsi, %rax
+ mulxq -16(%rsp), %r14, %rsi # 8-byte Folded Reload
+ adcq %rdi, %r14
+ mulxq -24(%rsp), %r15, %rdi # 8-byte Folded Reload
+ adcq %r8, %r15
+ mulxq 32(%rsp), %r12, %rdx # 8-byte Folded Reload
+ adcq %r9, %r12
+ adcq $0, %r13
+ addq -96(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r11, %rbp
+ movq %rbp, -96(%rsp) # 8-byte Spill
+ adcq %r10, %rax
+ movq %rax, -112(%rsp) # 8-byte Spill
+ adcq %rbx, %r14
+ adcq %rsi, %r15
+ adcq %rdi, %r12
+ adcq %rdx, %r13
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ mulxq -48(%rsp), %rsi, %rdi # 8-byte Folded Reload
+ mulxq -32(%rsp), %r11, %r8 # 8-byte Folded Reload
+ addq %rsi, %r8
+ mulxq -56(%rsp), %rbx, %rsi # 8-byte Folded Reload
+ adcq %rdi, %rbx
+ mulxq -64(%rsp), %rbp, %rdi # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ mulxq -72(%rsp), %rsi, %r9 # 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -80(%rsp), %rdi, %rax # 8-byte Folded Reload
+ adcq %r9, %rdi
+ mulxq -88(%rsp), %r9, %r10 # 8-byte Folded Reload
+ adcq %rax, %r9
+ adcq $0, %r10
+ addq %rcx, %r11
+ adcq -96(%rsp), %r8 # 8-byte Folded Reload
+ adcq -112(%rsp), %rbx # 8-byte Folded Reload
+ adcq %r14, %rbp
+ adcq %r15, %rsi
+ adcq %r12, %rdi
+ adcq %r13, %r9
+ adcq $0, %r10
+ movq %r11, %rdx
+ imulq 8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rax, -96(%rsp) # 8-byte Spill
+ addq %r11, %rcx
+ mulxq 16(%rsp), %rcx, %r13 # 8-byte Folded Reload
+ adcq %r8, %rcx
+ mulxq (%rsp), %rax, %r8 # 8-byte Folded Reload
+ adcq %rbx, %rax
+ mulxq -8(%rsp), %rbx, %r11 # 8-byte Folded Reload
+ adcq %rbp, %rbx
+ mulxq -16(%rsp), %r14, %rbp # 8-byte Folded Reload
+ adcq %rsi, %r14
+ mulxq -24(%rsp), %r15, %rsi # 8-byte Folded Reload
+ adcq %rdi, %r15
+ mulxq 32(%rsp), %r12, %rdx # 8-byte Folded Reload
+ adcq %r9, %r12
+ adcq $0, %r10
+ addq -96(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r13, %rax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ adcq %r8, %rbx
+ movq %rbx, -112(%rsp) # 8-byte Spill
+ adcq %r11, %r14
+ adcq %rbp, %r15
+ adcq %rsi, %r12
+ adcq %rdx, %r10
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ mulxq -48(%rsp), %rsi, %rax # 8-byte Folded Reload
+ mulxq -32(%rsp), %r11, %rbp # 8-byte Folded Reload
+ addq %rsi, %rbp
+ mulxq -56(%rsp), %rbx, %rdi # 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -64(%rsp), %rsi, %rax # 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -72(%rsp), %rdi, %r9 # 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -80(%rsp), %r8, %rax # 8-byte Folded Reload
+ adcq %r9, %r8
+ mulxq -88(%rsp), %r9, %r13 # 8-byte Folded Reload
+ adcq %rax, %r9
+ adcq $0, %r13
+ addq %rcx, %r11
+ adcq -96(%rsp), %rbp # 8-byte Folded Reload
+ adcq -112(%rsp), %rbx # 8-byte Folded Reload
+ adcq %r14, %rsi
+ adcq %r15, %rdi
+ adcq %r12, %r8
+ adcq %r10, %r9
+ adcq $0, %r13
+ movq %r11, %rdx
+ imulq 8(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 24(%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rax, -112(%rsp) # 8-byte Spill
+ addq %r11, %rcx
+ mulxq 16(%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rax, -120(%rsp) # 8-byte Spill
+ adcq %rbp, %rcx
+ mulxq (%rsp), %rax, %rbp # 8-byte Folded Reload
+ movq %rbp, -128(%rsp) # 8-byte Spill
+ adcq %rbx, %rax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ mulxq -8(%rsp), %r14, %rbp # 8-byte Folded Reload
+ adcq %rsi, %r14
+ mulxq -16(%rsp), %r11, %r12 # 8-byte Folded Reload
+ adcq %rdi, %r11
+ mulxq -24(%rsp), %r10, %rbx # 8-byte Folded Reload
+ adcq %r8, %r10
+ mulxq 32(%rsp), %rdi, %rax # 8-byte Folded Reload
+ adcq %r9, %rdi
+ adcq $0, %r13
+ addq -112(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -112(%rsp) # 8-byte Spill
+ movq -96(%rsp), %rcx # 8-byte Reload
+ adcq -120(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -96(%rsp) # 8-byte Spill
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ adcq %rbp, %r11
+ adcq %r12, %r10
+ adcq %rbx, %rdi
+ adcq %rax, %r13
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ mulxq -48(%rsp), %rbp, %r9 # 8-byte Folded Reload
+ mulxq -32(%rsp), %r8, %rax # 8-byte Folded Reload
+ addq %rbp, %rax
+ mulxq -56(%rsp), %rbx, %rcx # 8-byte Folded Reload
+ adcq %r9, %rbx
+ mulxq -64(%rsp), %rbp, %r9 # 8-byte Folded Reload
+ adcq %rcx, %rbp
+ mulxq -72(%rsp), %rcx, %r12 # 8-byte Folded Reload
+ adcq %r9, %rcx
+ mulxq -80(%rsp), %r15, %rsi # 8-byte Folded Reload
+ movq %rsi, -32(%rsp) # 8-byte Spill
+ adcq %r12, %r15
+ mulxq -88(%rsp), %r12, %r9 # 8-byte Folded Reload
+ adcq -32(%rsp), %r12 # 8-byte Folded Reload
+ adcq $0, %r9
+ addq -112(%rsp), %r8 # 8-byte Folded Reload
+ adcq -96(%rsp), %rax # 8-byte Folded Reload
+ adcq %r14, %rbx
+ adcq %r11, %rbp
+ adcq %r10, %rcx
+ adcq %rdi, %r15
+ adcq %r13, %r12
+ adcq $0, %r9
+ movq 8(%rsp), %rdx # 8-byte Reload
+ imulq %r8, %rdx
+ mulxq 24(%rsp), %rdi, %rsi # 8-byte Folded Reload
+ movq %rsi, 8(%rsp) # 8-byte Spill
+ addq %r8, %rdi
+ mulxq 16(%rsp), %r8, %rsi # 8-byte Folded Reload
+ movq %rsi, -32(%rsp) # 8-byte Spill
+ adcq %rax, %r8
+ movq (%rsp), %r11 # 8-byte Reload
+ mulxq %r11, %rsi, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ adcq %rbx, %rsi
+ movq -8(%rsp), %r14 # 8-byte Reload
+ mulxq %r14, %rdi, %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ adcq %rbp, %rdi
+ movq -16(%rsp), %rbp # 8-byte Reload
+ mulxq %rbp, %rax, %rbx
+ movq %rbx, -56(%rsp) # 8-byte Spill
+ adcq %rcx, %rax
+ movq -24(%rsp), %rbx # 8-byte Reload
+ mulxq %rbx, %rcx, %r13
+ adcq %r15, %rcx
+ mulxq 32(%rsp), %rdx, %r15 # 8-byte Folded Reload
+ adcq %r12, %rdx
+ adcq $0, %r9
+ addq 8(%rsp), %r8 # 8-byte Folded Reload
+ adcq -32(%rsp), %rsi # 8-byte Folded Reload
+ adcq -40(%rsp), %rdi # 8-byte Folded Reload
+ adcq -48(%rsp), %rax # 8-byte Folded Reload
+ adcq -56(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r13, %rdx
+ adcq %r15, %r9
+ movq %r8, %r13
+ subq 24(%rsp), %r13 # 8-byte Folded Reload
+ movq %rsi, %r12
+ sbbq 16(%rsp), %r12 # 8-byte Folded Reload
+ movq %rdi, %r10
+ sbbq %r11, %r10
+ movq %rax, %r11
+ sbbq %r14, %r11
+ movq %rcx, %r14
+ sbbq %rbp, %r14
+ movq %rdx, %r15
+ sbbq %rbx, %r15
+ movq %r9, %rbp
+ sbbq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rbx
+ sarq $63, %rbx
+ cmovsq %r8, %r13
+ movq -104(%rsp), %rbx # 8-byte Reload
+ movq %r13, (%rbx)
+ cmovsq %rsi, %r12
+ movq %r12, 8(%rbx)
+ cmovsq %rdi, %r10
+ movq %r10, 16(%rbx)
+ cmovsq %rax, %r11
+ movq %r11, 24(%rbx)
+ cmovsq %rcx, %r14
+ movq %r14, 32(%rbx)
+ cmovsq %rdx, %r15
+ movq %r15, 40(%rbx)
+ cmovsq %r9, %rbp
+ movq %rbp, 48(%rbx)
+ addq $40, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end99:
+ .size mcl_fp_montNF7Lbmi2, .Lfunc_end99-mcl_fp_montNF7Lbmi2
+
+ .globl mcl_fp_montRed7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed7Lbmi2,@function
+mcl_fp_montRed7Lbmi2: # @mcl_fp_montRed7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $64, %rsp
+ movq %rdx, %rcx
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq (%rcx), %rbx
+ movq %rbx, 32(%rsp) # 8-byte Spill
+ movq (%rsi), %rdx
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ imulq %rax, %rdx
+ movq 48(%rcx), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ mulxq %rax, %rdi, %rax
+ movq %rdi, 40(%rsp) # 8-byte Spill
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq 40(%rcx), %r8
+ movq %r8, (%rsp) # 8-byte Spill
+ movq 32(%rcx), %r9
+ movq %r9, 24(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rbp
+ movq %rbp, 8(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rdi
+ movq %rdi, 56(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ mulxq %r8, %r10, %r11
+ mulxq %r9, %r14, %r9
+ mulxq %rbp, %r8, %r13
+ mulxq %rdi, %rcx, %r12
+ mulxq %rax, %rbp, %r15
+ mulxq %rbx, %rdx, %rdi
+ addq %rbp, %rdi
+ adcq %rcx, %r15
+ adcq %r8, %r12
+ adcq %r14, %r13
+ adcq %r10, %r9
+ adcq 40(%rsp), %r11 # 8-byte Folded Reload
+ movq -48(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq 48(%rsp), %rdx # 8-byte Folded Reload
+ adcq 8(%rsi), %rdi
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %r12
+ adcq 32(%rsi), %r13
+ adcq 40(%rsi), %r9
+ movq %r9, -96(%rsp) # 8-byte Spill
+ adcq 48(%rsi), %r11
+ movq %r11, -72(%rsp) # 8-byte Spill
+ adcq 56(%rsi), %rcx
+ movq %rcx, -48(%rsp) # 8-byte Spill
+ movq 104(%rsi), %r8
+ movq 96(%rsi), %rdx
+ movq 88(%rsi), %rbp
+ movq 80(%rsi), %rbx
+ movq 72(%rsi), %rcx
+ movq 64(%rsi), %rsi
+ adcq $0, %rsi
+ movq %rsi, -104(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ adcq $0, %rbx
+ movq %rbx, -40(%rsp) # 8-byte Spill
+ adcq $0, %rbp
+ movq %rbp, -32(%rsp) # 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) # 8-byte Spill
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq %rcx, 40(%rsp) # 8-byte Spill
+ movq %rdi, %rdx
+ movq -24(%rsp), %r9 # 8-byte Reload
+ imulq %r9, %rdx
+ mulxq 16(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -112(%rsp) # 8-byte Spill
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ mulxq (%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -120(%rsp) # 8-byte Spill
+ movq %rcx, -64(%rsp) # 8-byte Spill
+ movq 24(%rsp), %rbx # 8-byte Reload
+ mulxq %rbx, %rcx, %rbp
+ movq %rcx, -128(%rsp) # 8-byte Spill
+ mulxq 8(%rsp), %r10, %r14 # 8-byte Folded Reload
+ mulxq 56(%rsp), %rsi, %r11 # 8-byte Folded Reload
+ mulxq %rax, %rcx, %r8
+ mulxq 32(%rsp), %rdx, %rax # 8-byte Folded Reload
+ addq %rcx, %rax
+ adcq %rsi, %r8
+ adcq %r10, %r11
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ adcq -120(%rsp), %rbp # 8-byte Folded Reload
+ movq -64(%rsp), %rsi # 8-byte Reload
+ adcq -112(%rsp), %rsi # 8-byte Folded Reload
+ movq -56(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq %rdi, %rdx
+ adcq %r15, %rax
+ adcq %r12, %r8
+ adcq %r13, %r11
+ adcq -96(%rsp), %r14 # 8-byte Folded Reload
+ adcq -72(%rsp), %rbp # 8-byte Folded Reload
+ adcq -48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -64(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ adcq $0, -80(%rsp) # 8-byte Folded Spill
+ adcq $0, -40(%rsp) # 8-byte Folded Spill
+ adcq $0, -32(%rsp) # 8-byte Folded Spill
+ adcq $0, -8(%rsp) # 8-byte Folded Spill
+ adcq $0, 48(%rsp) # 8-byte Folded Spill
+ adcq $0, 40(%rsp) # 8-byte Folded Spill
+ movq %rax, %rdx
+ imulq %r9, %rdx
+ mulxq 16(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -96(%rsp) # 8-byte Spill
+ movq %rcx, -48(%rsp) # 8-byte Spill
+ movq (%rsp), %r15 # 8-byte Reload
+ mulxq %r15, %rsi, %rcx
+ movq %rsi, -104(%rsp) # 8-byte Spill
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ mulxq %rbx, %rcx, %r13
+ movq %rcx, -112(%rsp) # 8-byte Spill
+ mulxq 8(%rsp), %rbx, %r12 # 8-byte Folded Reload
+ mulxq 56(%rsp), %rdi, %r9 # 8-byte Folded Reload
+ mulxq -16(%rsp), %rsi, %r10 # 8-byte Folded Reload
+ mulxq 32(%rsp), %rdx, %rcx # 8-byte Folded Reload
+ addq %rsi, %rcx
+ adcq %rdi, %r10
+ adcq %rbx, %r9
+ adcq -112(%rsp), %r12 # 8-byte Folded Reload
+ adcq -104(%rsp), %r13 # 8-byte Folded Reload
+ movq -72(%rsp), %rdi # 8-byte Reload
+ adcq -96(%rsp), %rdi # 8-byte Folded Reload
+ movq -48(%rsp), %rsi # 8-byte Reload
+ adcq $0, %rsi
+ addq %rax, %rdx
+ adcq %r8, %rcx
+ adcq %r11, %r10
+ adcq %r14, %r9
+ adcq %rbp, %r12
+ adcq -64(%rsp), %r13 # 8-byte Folded Reload
+ adcq -56(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ adcq -80(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -48(%rsp) # 8-byte Spill
+ adcq $0, -40(%rsp) # 8-byte Folded Spill
+ adcq $0, -32(%rsp) # 8-byte Folded Spill
+ adcq $0, -8(%rsp) # 8-byte Folded Spill
+ adcq $0, 48(%rsp) # 8-byte Folded Spill
+ adcq $0, 40(%rsp) # 8-byte Folded Spill
+ movq %rcx, %rdx
+ imulq -24(%rsp), %rdx # 8-byte Folded Reload
+ mulxq 16(%rsp), %rsi, %rax # 8-byte Folded Reload
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ movq %rax, -56(%rsp) # 8-byte Spill
+ mulxq %r15, %rsi, %rax
+ movq %rsi, -96(%rsp) # 8-byte Spill
+ movq %rax, -64(%rsp) # 8-byte Spill
+ mulxq 24(%rsp), %r8, %r15 # 8-byte Folded Reload
+ mulxq 8(%rsp), %r14, %rbp # 8-byte Folded Reload
+ mulxq 56(%rsp), %rdi, %rbx # 8-byte Folded Reload
+ mulxq -16(%rsp), %rsi, %r11 # 8-byte Folded Reload
+ mulxq 32(%rsp), %rdx, %rax # 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %rdi, %r11
+ adcq %r14, %rbx
+ adcq %r8, %rbp
+ adcq -96(%rsp), %r15 # 8-byte Folded Reload
+ movq -64(%rsp), %rdi # 8-byte Reload
+ adcq -80(%rsp), %rdi # 8-byte Folded Reload
+ movq -56(%rsp), %rsi # 8-byte Reload
+ adcq $0, %rsi
+ addq %rcx, %rdx
+ adcq %r10, %rax
+ adcq %r9, %r11
+ adcq %r12, %rbx
+ adcq %r13, %rbp
+ adcq -72(%rsp), %r15 # 8-byte Folded Reload
+ adcq -48(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -64(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -56(%rsp) # 8-byte Spill
+ adcq $0, -32(%rsp) # 8-byte Folded Spill
+ adcq $0, -8(%rsp) # 8-byte Folded Spill
+ adcq $0, 48(%rsp) # 8-byte Folded Spill
+ adcq $0, 40(%rsp) # 8-byte Folded Spill
+ movq %rax, %rdx
+ imulq -24(%rsp), %rdx # 8-byte Folded Reload
+ movq 16(%rsp), %r10 # 8-byte Reload
+ mulxq %r10, %rsi, %rcx
+ movq %rsi, -72(%rsp) # 8-byte Spill
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ mulxq (%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ movq %rcx, -48(%rsp) # 8-byte Spill
+ mulxq 24(%rsp), %rsi, %rcx # 8-byte Folded Reload
+ movq %rsi, -96(%rsp) # 8-byte Spill
+ mulxq 8(%rsp), %r12, %r13 # 8-byte Folded Reload
+ mulxq 56(%rsp), %r8, %r14 # 8-byte Folded Reload
+ mulxq -16(%rsp), %rsi, %r9 # 8-byte Folded Reload
+ mulxq 32(%rsp), %rdx, %rdi # 8-byte Folded Reload
+ addq %rsi, %rdi
+ adcq %r8, %r9
+ adcq %r12, %r14
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ adcq -80(%rsp), %rcx # 8-byte Folded Reload
+ movq -48(%rsp), %r8 # 8-byte Reload
+ adcq -72(%rsp), %r8 # 8-byte Folded Reload
+ movq -40(%rsp), %rsi # 8-byte Reload
+ adcq $0, %rsi
+ addq %rax, %rdx
+ adcq %r11, %rdi
+ adcq %rbx, %r9
+ adcq %rbp, %r14
+ adcq %r15, %r13
+ adcq -64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -64(%rsp) # 8-byte Spill
+ adcq -56(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -48(%rsp) # 8-byte Spill
+ adcq -32(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -40(%rsp) # 8-byte Spill
+ adcq $0, -8(%rsp) # 8-byte Folded Spill
+ adcq $0, 48(%rsp) # 8-byte Folded Spill
+ adcq $0, 40(%rsp) # 8-byte Folded Spill
+ movq %rdi, %rdx
+ imulq -24(%rsp), %rdx # 8-byte Folded Reload
+ mulxq %r10, %rcx, %rax
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ movq %rax, -32(%rsp) # 8-byte Spill
+ mulxq (%rsp), %rcx, %rax # 8-byte Folded Reload
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ movq %rax, -56(%rsp) # 8-byte Spill
+ mulxq 24(%rsp), %rax, %rcx # 8-byte Folded Reload
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq 8(%rsp), %r12 # 8-byte Reload
+ mulxq %r12, %rax, %r15
+ movq %rax, -104(%rsp) # 8-byte Spill
+ mulxq 56(%rsp), %rsi, %r11 # 8-byte Folded Reload
+ movq -16(%rsp), %r10 # 8-byte Reload
+ mulxq %r10, %rax, %rbp
+ movq 32(%rsp), %rbx # 8-byte Reload
+ mulxq %rbx, %rdx, %r8
+ addq %rax, %r8
+ adcq %rsi, %rbp
+ adcq -104(%rsp), %r11 # 8-byte Folded Reload
+ adcq -96(%rsp), %r15 # 8-byte Folded Reload
+ adcq -80(%rsp), %rcx # 8-byte Folded Reload
+ movq -56(%rsp), %rsi # 8-byte Reload
+ adcq -72(%rsp), %rsi # 8-byte Folded Reload
+ movq -32(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rdi, %rdx
+ adcq %r9, %r8
+ adcq %r14, %rbp
+ adcq %r13, %r11
+ adcq -64(%rsp), %r15 # 8-byte Folded Reload
+ adcq -48(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -48(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -56(%rsp) # 8-byte Spill
+ adcq -8(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -32(%rsp) # 8-byte Spill
+ adcq $0, 48(%rsp) # 8-byte Folded Spill
+ adcq $0, 40(%rsp) # 8-byte Folded Spill
+ movq -24(%rsp), %rdx # 8-byte Reload
+ imulq %r8, %rdx
+ mulxq %r12, %rax, %r13
+ mulxq %r10, %rcx, %rdi
+ mulxq %rbx, %r12, %r14
+ addq %rcx, %r14
+ mulxq 56(%rsp), %rcx, %r10 # 8-byte Folded Reload
+ adcq %rdi, %rcx
+ adcq %rax, %r10
+ mulxq 24(%rsp), %rax, %r9 # 8-byte Folded Reload
+ adcq %r13, %rax
+ mulxq (%rsp), %rdi, %r13 # 8-byte Folded Reload
+ adcq %r9, %rdi
+ mulxq 16(%rsp), %rdx, %rsi # 8-byte Folded Reload
+ adcq %r13, %rdx
+ adcq $0, %rsi
+ addq %r8, %r12
+ adcq %rbp, %r14
+ adcq %r11, %rcx
+ adcq %r15, %r10
+ adcq -48(%rsp), %rax # 8-byte Folded Reload
+ adcq -56(%rsp), %rdi # 8-byte Folded Reload
+ adcq -32(%rsp), %rdx # 8-byte Folded Reload
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ movq %r14, %rbp
+ subq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rcx, %r13
+ sbbq -16(%rsp), %r13 # 8-byte Folded Reload
+ movq %r10, %r8
+ sbbq 56(%rsp), %r8 # 8-byte Folded Reload
+ movq %rax, %r9
+ sbbq 8(%rsp), %r9 # 8-byte Folded Reload
+ movq %rdi, %r11
+ sbbq 24(%rsp), %r11 # 8-byte Folded Reload
+ movq %rdx, %r15
+ sbbq (%rsp), %r15 # 8-byte Folded Reload
+ movq %rsi, %r12
+ sbbq 16(%rsp), %r12 # 8-byte Folded Reload
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %rsi, %r12
+ testb %bl, %bl
+ cmovneq %r14, %rbp
+ movq -88(%rsp), %rsi # 8-byte Reload
+ movq %rbp, (%rsi)
+ cmovneq %rcx, %r13
+ movq %r13, 8(%rsi)
+ cmovneq %r10, %r8
+ movq %r8, 16(%rsi)
+ cmovneq %rax, %r9
+ movq %r9, 24(%rsi)
+ cmovneq %rdi, %r11
+ movq %r11, 32(%rsi)
+ cmovneq %rdx, %r15
+ movq %r15, 40(%rsi)
+ movq %r12, 48(%rsi)
+ addq $64, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end100:
+ .size mcl_fp_montRed7Lbmi2, .Lfunc_end100-mcl_fp_montRed7Lbmi2
+
+ .globl mcl_fp_addPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre7Lbmi2,@function
+mcl_fp_addPre7Lbmi2: # @mcl_fp_addPre7Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r8
+ movq 48(%rsi), %r14
+ movq 40(%rdx), %r9
+ movq 40(%rsi), %r15
+ movq 32(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r12
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rbx
+ adcq 16(%rsi), %r12
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r12, 16(%rdi)
+ adcq %r11, %rax
+ movq %rax, 24(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r9, %r15
+ movq %r15, 40(%rdi)
+ adcq %r8, %r14
+ movq %r14, 48(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end101:
+ .size mcl_fp_addPre7Lbmi2, .Lfunc_end101-mcl_fp_addPre7Lbmi2
+
+ .globl mcl_fp_subPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre7Lbmi2,@function
+mcl_fp_subPre7Lbmi2: # @mcl_fp_subPre7Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r8
+ movq 48(%rsi), %r10
+ movq 40(%rdx), %r9
+ movq 40(%rsi), %r15
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %r12
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %r12
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq 32(%rsi), %rdx
+ movq 24(%rsi), %rsi
+ movq %rbx, (%rdi)
+ movq %r12, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ sbbq %r14, %rdx
+ movq %rdx, 32(%rdi)
+ sbbq %r9, %r15
+ movq %r15, 40(%rdi)
+ sbbq %r8, %r10
+ movq %r10, 48(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end102:
+ .size mcl_fp_subPre7Lbmi2, .Lfunc_end102-mcl_fp_subPre7Lbmi2
+
+ .globl mcl_fp_shr1_7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_7Lbmi2,@function
+mcl_fp_shr1_7Lbmi2: # @mcl_fp_shr1_7Lbmi2
+# BB#0:
+ movq 48(%rsi), %r8
+ movq 40(%rsi), %r9
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %rax
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rdx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rdx
+ movq %rdx, (%rdi)
+ shrdq $1, %rcx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rax, %rcx
+ movq %rcx, 16(%rdi)
+ shrdq $1, %r10, %rax
+ movq %rax, 24(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 32(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 40(%rdi)
+ shrq %r8
+ movq %r8, 48(%rdi)
+ retq
+.Lfunc_end103:
+ .size mcl_fp_shr1_7Lbmi2, .Lfunc_end103-mcl_fp_shr1_7Lbmi2
+
+ .globl mcl_fp_add7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add7Lbmi2,@function
+mcl_fp_add7Lbmi2: # @mcl_fp_add7Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r14
+ movq 48(%rsi), %r8
+ movq 40(%rdx), %r15
+ movq 40(%rsi), %r9
+ movq 32(%rdx), %r12
+ movq 24(%rdx), %r13
+ movq 16(%rdx), %r10
+ movq (%rdx), %r11
+ movq 8(%rdx), %rdx
+ addq (%rsi), %r11
+ adcq 8(%rsi), %rdx
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rbx
+ adcq 16(%rsi), %r10
+ movq %r11, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ adcq %r13, %rax
+ movq %rax, 24(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r15, %r9
+ movq %r9, 40(%rdi)
+ adcq %r14, %r8
+ movq %r8, 48(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %r11
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r10
+ sbbq 24(%rcx), %rax
+ sbbq 32(%rcx), %rbx
+ sbbq 40(%rcx), %r9
+ sbbq 48(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB104_2
+# BB#1: # %nocarry
+ movq %r11, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %rax, 24(%rdi)
+ movq %rbx, 32(%rdi)
+ movq %r9, 40(%rdi)
+ movq %r8, 48(%rdi)
+.LBB104_2: # %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end104:
+ .size mcl_fp_add7Lbmi2, .Lfunc_end104-mcl_fp_add7Lbmi2
+
+ .globl mcl_fp_addNF7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF7Lbmi2,@function
+mcl_fp_addNF7Lbmi2: # @mcl_fp_addNF7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rbp
+ movq 32(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r14
+ movq (%rdx), %r12
+ movq 8(%rdx), %r15
+ addq (%rsi), %r12
+ adcq 8(%rsi), %r15
+ adcq 16(%rsi), %r14
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %r10
+ adcq 40(%rsi), %rbp
+ movq %rbp, -8(%rsp) # 8-byte Spill
+ adcq 48(%rsi), %r9
+ movq %r12, %rsi
+ subq (%rcx), %rsi
+ movq %r15, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r14, %rax
+ sbbq 16(%rcx), %rax
+ movq %r11, %rbx
+ sbbq 24(%rcx), %rbx
+ movq %r10, %r13
+ sbbq 32(%rcx), %r13
+ sbbq 40(%rcx), %rbp
+ movq %r9, %r8
+ sbbq 48(%rcx), %r8
+ movq %r8, %rcx
+ sarq $63, %rcx
+ cmovsq %r12, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r15, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r14, %rax
+ movq %rax, 16(%rdi)
+ cmovsq %r11, %rbx
+ movq %rbx, 24(%rdi)
+ cmovsq %r10, %r13
+ movq %r13, 32(%rdi)
+ cmovsq -8(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 40(%rdi)
+ cmovsq %r9, %r8
+ movq %r8, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end105:
+ .size mcl_fp_addNF7Lbmi2, .Lfunc_end105-mcl_fp_addNF7Lbmi2
+
+ .globl mcl_fp_sub7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub7Lbmi2,@function
+mcl_fp_sub7Lbmi2: # @mcl_fp_sub7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r14
+ movq 48(%rsi), %r8
+ movq 40(%rdx), %r15
+ movq 40(%rsi), %r9
+ movq 32(%rdx), %r12
+ movq (%rsi), %rax
+ movq 8(%rsi), %r11
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r11
+ movq 16(%rsi), %r13
+ sbbq 16(%rdx), %r13
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %rsi
+ sbbq 24(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r11, 8(%rdi)
+ movq %r13, 16(%rdi)
+ movq %rsi, 24(%rdi)
+ sbbq %r12, %r10
+ movq %r10, 32(%rdi)
+ sbbq %r15, %r9
+ movq %r9, 40(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 48(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB106_2
+# BB#1: # %carry
+ movq 48(%rcx), %r14
+ movq 40(%rcx), %r15
+ movq 32(%rcx), %r12
+ movq 24(%rcx), %rbx
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rbp
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r13, %rbp
+ movq %rbp, 16(%rdi)
+ adcq %rsi, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r10, %r12
+ movq %r12, 32(%rdi)
+ adcq %r9, %r15
+ movq %r15, 40(%rdi)
+ adcq %r8, %r14
+ movq %r14, 48(%rdi)
+.LBB106_2: # %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end106:
+ .size mcl_fp_sub7Lbmi2, .Lfunc_end106-mcl_fp_sub7Lbmi2
+
+ .globl mcl_fp_subNF7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF7Lbmi2,@function
+mcl_fp_subNF7Lbmi2: # @mcl_fp_subNF7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 48(%rsi), %r12
+ movq 40(%rsi), %rax
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r10
+ movq 16(%rsi), %r11
+ movq (%rsi), %r14
+ movq 8(%rsi), %r15
+ subq (%rdx), %r14
+ sbbq 8(%rdx), %r15
+ sbbq 16(%rdx), %r11
+ sbbq 24(%rdx), %r10
+ sbbq 32(%rdx), %r9
+ sbbq 40(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ sbbq 48(%rdx), %r12
+ movq %r12, %rax
+ sarq $63, %rax
+ movq %rax, %rsi
+ shldq $1, %r12, %rsi
+ andq (%r8), %rsi
+ movq 48(%r8), %r13
+ andq %rax, %r13
+ movq 40(%r8), %rbx
+ andq %rax, %rbx
+ movq 32(%r8), %rdx
+ andq %rax, %rdx
+ movq 24(%r8), %rbp
+ andq %rax, %rbp
+ movq 16(%r8), %rcx
+ andq %rax, %rcx
+ andq 8(%r8), %rax
+ addq %r14, %rsi
+ adcq %r15, %rax
+ movq %rsi, (%rdi)
+ movq %rax, 8(%rdi)
+ adcq %r11, %rcx
+ movq %rcx, 16(%rdi)
+ adcq %r10, %rbp
+ movq %rbp, 24(%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 32(%rdi)
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 40(%rdi)
+ adcq %r12, %r13
+ movq %r13, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end107:
+ .size mcl_fp_subNF7Lbmi2, .Lfunc_end107-mcl_fp_subNF7Lbmi2
+
+ .globl mcl_fpDbl_add7Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add7Lbmi2,@function
+mcl_fpDbl_add7Lbmi2: # @mcl_fpDbl_add7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 104(%rdx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 96(%rdx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 88(%rdx), %r11
+ movq 80(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r12
+ movq 16(%rdx), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %rbx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %r9
+ adcq 24(%rdx), %r15
+ adcq 32(%rdx), %r12
+ movq 72(%rdx), %r13
+ movq 64(%rdx), %rbp
+ movq %rax, (%rdi)
+ movq 56(%rdx), %r10
+ movq %rbx, 8(%rdi)
+ movq 48(%rdx), %rcx
+ movq 40(%rdx), %rdx
+ movq %r9, 16(%rdi)
+ movq 104(%rsi), %r9
+ movq %r15, 24(%rdi)
+ movq 40(%rsi), %rbx
+ adcq %rdx, %rbx
+ movq 96(%rsi), %r15
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %rdx
+ adcq %rcx, %rdx
+ movq 88(%rsi), %rax
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rcx
+ adcq %r10, %rcx
+ movq 80(%rsi), %r12
+ movq %rdx, 48(%rdi)
+ movq 72(%rsi), %rdx
+ movq 64(%rsi), %rsi
+ adcq %rbp, %rsi
+ adcq %r13, %rdx
+ adcq %r14, %r12
+ adcq %r11, %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, -24(%rsp) # 8-byte Spill
+ adcq -16(%rsp), %r9 # 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq %rcx, %rbx
+ subq (%r8), %rbx
+ movq %rsi, %r10
+ sbbq 8(%r8), %r10
+ movq %rdx, %r11
+ sbbq 16(%r8), %r11
+ movq %r12, %r14
+ sbbq 24(%r8), %r14
+ movq -8(%rsp), %r13 # 8-byte Reload
+ sbbq 32(%r8), %r13
+ sbbq 40(%r8), %r15
+ movq %r9, %rax
+ sbbq 48(%r8), %rax
+ sbbq $0, %rbp
+ andl $1, %ebp
+ cmovneq %rcx, %rbx
+ movq %rbx, 56(%rdi)
+ testb %bpl, %bpl
+ cmovneq %rsi, %r10
+ movq %r10, 64(%rdi)
+ cmovneq %rdx, %r11
+ movq %r11, 72(%rdi)
+ cmovneq %r12, %r14
+ movq %r14, 80(%rdi)
+ cmovneq -8(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 88(%rdi)
+ cmovneq -24(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, 96(%rdi)
+ cmovneq %r9, %rax
+ movq %rax, 104(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end108:
+ .size mcl_fpDbl_add7Lbmi2, .Lfunc_end108-mcl_fpDbl_add7Lbmi2
+
+ .globl mcl_fpDbl_sub7Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub7Lbmi2,@function
+mcl_fpDbl_sub7Lbmi2: # @mcl_fpDbl_sub7Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 104(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 96(%rdx), %r10
+ movq 88(%rdx), %r14
+ movq 16(%rsi), %rax
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %ecx, %ecx
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %rax
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 80(%rdx), %r13
+ movq 72(%rdx), %rbp
+ movq %r15, (%rdi)
+ movq 64(%rdx), %r9
+ movq %r11, 8(%rdi)
+ movq 56(%rdx), %r15
+ movq %rax, 16(%rdi)
+ movq 48(%rdx), %r11
+ movq 40(%rdx), %rdx
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %rdx, %rbx
+ movq 104(%rsi), %rax
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %r12
+ sbbq %r11, %r12
+ movq 96(%rsi), %r11
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rdx
+ sbbq %r15, %rdx
+ movq 88(%rsi), %r15
+ movq %r12, 48(%rdi)
+ movq 64(%rsi), %rbx
+ sbbq %r9, %rbx
+ movq 80(%rsi), %r12
+ movq 72(%rsi), %r9
+ sbbq %rbp, %r9
+ sbbq %r13, %r12
+ sbbq %r14, %r15
+ sbbq %r10, %r11
+ sbbq -8(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%r8), %r10
+ cmoveq %rcx, %r10
+ testb %bpl, %bpl
+ movq 16(%r8), %rbp
+ cmoveq %rcx, %rbp
+ movq 8(%r8), %rsi
+ cmoveq %rcx, %rsi
+ movq 48(%r8), %r14
+ cmoveq %rcx, %r14
+ movq 40(%r8), %r13
+ cmoveq %rcx, %r13
+ movq 32(%r8), %rax
+ cmoveq %rcx, %rax
+ cmovneq 24(%r8), %rcx
+ addq %rdx, %r10
+ adcq %rbx, %rsi
+ movq %r10, 56(%rdi)
+ movq %rsi, 64(%rdi)
+ adcq %r9, %rbp
+ movq %rbp, 72(%rdi)
+ adcq %r12, %rcx
+ movq %rcx, 80(%rdi)
+ adcq %r15, %rax
+ movq %rax, 88(%rdi)
+ adcq %r11, %r13
+ movq %r13, 96(%rdi)
+ adcq -8(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, 104(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end109:
+ .size mcl_fpDbl_sub7Lbmi2, .Lfunc_end109-mcl_fpDbl_sub7Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv512x64,@function
+.LmulPv512x64: # @mulPv512x64
+# BB#0:
+ mulxq (%rsi), %rcx, %rax
+ movq %rcx, (%rdi)
+ mulxq 8(%rsi), %rcx, %r8
+ addq %rax, %rcx
+ movq %rcx, 8(%rdi)
+ mulxq 16(%rsi), %rcx, %r9
+ adcq %r8, %rcx
+ movq %rcx, 16(%rdi)
+ mulxq 24(%rsi), %rax, %rcx
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ mulxq 32(%rsi), %rax, %r8
+ adcq %rcx, %rax
+ movq %rax, 32(%rdi)
+ mulxq 40(%rsi), %rcx, %r9
+ adcq %r8, %rcx
+ movq %rcx, 40(%rdi)
+ mulxq 48(%rsi), %rax, %rcx
+ adcq %r9, %rax
+ movq %rax, 48(%rdi)
+ mulxq 56(%rsi), %rax, %rdx
+ adcq %rcx, %rax
+ movq %rax, 56(%rdi)
+ adcq $0, %rdx
+ movq %rdx, 64(%rdi)
+ movq %rdi, %rax
+ retq
+.Lfunc_end110:
+ .size .LmulPv512x64, .Lfunc_end110-.LmulPv512x64
+
+ .globl mcl_fp_mulUnitPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre8Lbmi2,@function
+mcl_fp_mulUnitPre8Lbmi2: # @mcl_fp_mulUnitPre8Lbmi2
+# BB#0:
+ pushq %rbx
+ subq $80, %rsp
+ movq %rdi, %rbx
+ leaq 8(%rsp), %rdi
+ callq .LmulPv512x64
+ movq 72(%rsp), %r8
+ movq 64(%rsp), %r9
+ movq 56(%rsp), %r10
+ movq 48(%rsp), %r11
+ movq 40(%rsp), %rdi
+ movq 32(%rsp), %rax
+ movq 24(%rsp), %rcx
+ movq 8(%rsp), %rdx
+ movq 16(%rsp), %rsi
+ movq %rdx, (%rbx)
+ movq %rsi, 8(%rbx)
+ movq %rcx, 16(%rbx)
+ movq %rax, 24(%rbx)
+ movq %rdi, 32(%rbx)
+ movq %r11, 40(%rbx)
+ movq %r10, 48(%rbx)
+ movq %r9, 56(%rbx)
+ movq %r8, 64(%rbx)
+ addq $80, %rsp
+ popq %rbx
+ retq
+.Lfunc_end111:
+ .size mcl_fp_mulUnitPre8Lbmi2, .Lfunc_end111-mcl_fp_mulUnitPre8Lbmi2
+
+ .globl mcl_fpDbl_mulPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre8Lbmi2,@function
+mcl_fpDbl_mulPre8Lbmi2: # @mcl_fpDbl_mulPre8Lbmi2
+# BB#0:
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $200, %rsp
+ movq %rdx, %rbx
+ movq %rsi, %r15
+ movq %rdi, %r14
+ callq mcl_fpDbl_mulPre4Lbmi2@PLT
+ leaq 64(%r14), %rdi
+ leaq 32(%r15), %rsi
+ leaq 32(%rbx), %rdx
+ callq mcl_fpDbl_mulPre4Lbmi2@PLT
+ movq 56(%rbx), %r10
+ movq 48(%rbx), %rcx
+ movq (%rbx), %rdx
+ movq 8(%rbx), %rsi
+ addq 32(%rbx), %rdx
+ adcq 40(%rbx), %rsi
+ adcq 16(%rbx), %rcx
+ adcq 24(%rbx), %r10
+ pushfq
+ popq %r8
+ xorl %r9d, %r9d
+ movq 56(%r15), %rdi
+ movq 48(%r15), %r13
+ movq (%r15), %r12
+ movq 8(%r15), %rbx
+ addq 32(%r15), %r12
+ adcq 40(%r15), %rbx
+ adcq 16(%r15), %r13
+ adcq 24(%r15), %rdi
+ movl $0, %eax
+ cmovbq %r10, %rax
+ movq %rax, -176(%rbp) # 8-byte Spill
+ movl $0, %eax
+ cmovbq %rcx, %rax
+ movq %rax, -184(%rbp) # 8-byte Spill
+ movl $0, %eax
+ cmovbq %rsi, %rax
+ movq %rax, -192(%rbp) # 8-byte Spill
+ movl $0, %eax
+ cmovbq %rdx, %rax
+ movq %rax, -200(%rbp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq %r12, -136(%rbp)
+ movq %rbx, -128(%rbp)
+ movq %r13, -120(%rbp)
+ movq %rdi, -112(%rbp)
+ movq %rdx, -168(%rbp)
+ movq %rsi, -160(%rbp)
+ movq %rcx, -152(%rbp)
+ movq %r10, -144(%rbp)
+ pushq %r8
+ popfq
+ cmovaeq %r9, %rdi
+ movq %rdi, -216(%rbp) # 8-byte Spill
+ cmovaeq %r9, %r13
+ cmovaeq %r9, %rbx
+ cmovaeq %r9, %r12
+ sbbq %rax, %rax
+ movq %rax, -208(%rbp) # 8-byte Spill
+ leaq -104(%rbp), %rdi
+ leaq -136(%rbp), %rsi
+ leaq -168(%rbp), %rdx
+ callq mcl_fpDbl_mulPre4Lbmi2@PLT
+ addq -200(%rbp), %r12 # 8-byte Folded Reload
+ adcq -192(%rbp), %rbx # 8-byte Folded Reload
+ adcq -184(%rbp), %r13 # 8-byte Folded Reload
+ movq -216(%rbp), %r10 # 8-byte Reload
+ adcq -176(%rbp), %r10 # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq -208(%rbp), %rdx # 8-byte Reload
+ andl %edx, %r15d
+ andl $1, %r15d
+ addq -72(%rbp), %r12
+ adcq -64(%rbp), %rbx
+ adcq -56(%rbp), %r13
+ adcq -48(%rbp), %r10
+ adcq %rax, %r15
+ movq -80(%rbp), %rax
+ movq -88(%rbp), %rcx
+ movq -104(%rbp), %rsi
+ movq -96(%rbp), %rdx
+ subq (%r14), %rsi
+ sbbq 8(%r14), %rdx
+ sbbq 16(%r14), %rcx
+ sbbq 24(%r14), %rax
+ movq 32(%r14), %rdi
+ movq %rdi, -184(%rbp) # 8-byte Spill
+ movq 40(%r14), %r8
+ movq %r8, -176(%rbp) # 8-byte Spill
+ sbbq %rdi, %r12
+ sbbq %r8, %rbx
+ movq 48(%r14), %rdi
+ movq %rdi, -192(%rbp) # 8-byte Spill
+ sbbq %rdi, %r13
+ movq 56(%r14), %rdi
+ movq %rdi, -200(%rbp) # 8-byte Spill
+ sbbq %rdi, %r10
+ sbbq $0, %r15
+ movq 64(%r14), %r11
+ subq %r11, %rsi
+ movq 72(%r14), %rdi
+ movq %rdi, -208(%rbp) # 8-byte Spill
+ sbbq %rdi, %rdx
+ movq 80(%r14), %rdi
+ movq %rdi, -216(%rbp) # 8-byte Spill
+ sbbq %rdi, %rcx
+ movq 88(%r14), %rdi
+ movq %rdi, -224(%rbp) # 8-byte Spill
+ sbbq %rdi, %rax
+ movq 96(%r14), %rdi
+ movq %rdi, -232(%rbp) # 8-byte Spill
+ sbbq %rdi, %r12
+ movq 104(%r14), %rdi
+ sbbq %rdi, %rbx
+ movq 112(%r14), %r8
+ sbbq %r8, %r13
+ movq 120(%r14), %r9
+ sbbq %r9, %r10
+ sbbq $0, %r15
+ addq -184(%rbp), %rsi # 8-byte Folded Reload
+ adcq -176(%rbp), %rdx # 8-byte Folded Reload
+ movq %rsi, 32(%r14)
+ adcq -192(%rbp), %rcx # 8-byte Folded Reload
+ movq %rdx, 40(%r14)
+ adcq -200(%rbp), %rax # 8-byte Folded Reload
+ movq %rcx, 48(%r14)
+ adcq %r11, %r12
+ movq %rax, 56(%r14)
+ movq %r12, 64(%r14)
+ adcq -208(%rbp), %rbx # 8-byte Folded Reload
+ movq %rbx, 72(%r14)
+ adcq -216(%rbp), %r13 # 8-byte Folded Reload
+ movq %r13, 80(%r14)
+ adcq -224(%rbp), %r10 # 8-byte Folded Reload
+ movq %r10, 88(%r14)
+ adcq -232(%rbp), %r15 # 8-byte Folded Reload
+ movq %r15, 96(%r14)
+ adcq $0, %rdi
+ movq %rdi, 104(%r14)
+ adcq $0, %r8
+ movq %r8, 112(%r14)
+ adcq $0, %r9
+ movq %r9, 120(%r14)
+ addq $200, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end112:
+ .size mcl_fpDbl_mulPre8Lbmi2, .Lfunc_end112-mcl_fpDbl_mulPre8Lbmi2
+
+ .globl mcl_fpDbl_sqrPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre8Lbmi2,@function
+mcl_fpDbl_sqrPre8Lbmi2: # @mcl_fpDbl_sqrPre8Lbmi2
+# BB#0:
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $200, %rsp
+ movq %rsi, %r14
+ movq %rdi, %rbx
+ movq %r14, %rdx
+ callq mcl_fpDbl_mulPre4Lbmi2@PLT
+ leaq 64(%rbx), %rdi
+ leaq 32(%r14), %rsi
+ movq %rsi, %rdx
+ callq mcl_fpDbl_mulPre4Lbmi2@PLT
+ movq (%r14), %r12
+ movq 8(%r14), %r15
+ addq 32(%r14), %r12
+ adcq 40(%r14), %r15
+ pushfq
+ popq %rax
+ movq %r12, -136(%rbp)
+ movq %r12, -168(%rbp)
+ addq %r12, %r12
+ movq %r15, -128(%rbp)
+ movq %r15, -160(%rbp)
+ adcq %r15, %r15
+ pushfq
+ popq %rcx
+ movq 56(%r14), %r13
+ movq 48(%r14), %rdx
+ pushq %rax
+ popfq
+ adcq 16(%r14), %rdx
+ adcq 24(%r14), %r13
+ pushfq
+ popq %r8
+ pushfq
+ popq %rsi
+ pushfq
+ popq %rdi
+ sbbq %rax, %rax
+ movq %rax, -184(%rbp) # 8-byte Spill
+ xorl %eax, %eax
+ pushq %rdi
+ popfq
+ cmovaeq %rax, %r15
+ movq %r15, -176(%rbp) # 8-byte Spill
+ cmovaeq %rax, %r12
+ movq %rdx, -120(%rbp)
+ movq %rdx, -152(%rbp)
+ movq %rdx, %r15
+ pushq %rcx
+ popfq
+ adcq %r15, %r15
+ movq %r13, %r14
+ movq %r13, -112(%rbp)
+ movq %r13, -144(%rbp)
+ adcq %r13, %r13
+ pushq %rsi
+ popfq
+ cmovaeq %rax, %r13
+ cmovaeq %rax, %r15
+ shrq $63, %r14
+ pushq %r8
+ popfq
+ cmovaeq %rax, %r14
+ leaq -104(%rbp), %rdi
+ leaq -136(%rbp), %rsi
+ leaq -168(%rbp), %rdx
+ callq mcl_fpDbl_mulPre4Lbmi2@PLT
+ movq -184(%rbp), %rax # 8-byte Reload
+ andl $1, %eax
+ addq -72(%rbp), %r12
+ movq -176(%rbp), %r8 # 8-byte Reload
+ adcq -64(%rbp), %r8
+ adcq -56(%rbp), %r15
+ adcq -48(%rbp), %r13
+ adcq %r14, %rax
+ movq %rax, %rdi
+ movq -80(%rbp), %rax
+ movq -88(%rbp), %rcx
+ movq -104(%rbp), %rsi
+ movq -96(%rbp), %rdx
+ subq (%rbx), %rsi
+ sbbq 8(%rbx), %rdx
+ sbbq 16(%rbx), %rcx
+ sbbq 24(%rbx), %rax
+ movq 32(%rbx), %r10
+ movq %r10, -184(%rbp) # 8-byte Spill
+ movq 40(%rbx), %r9
+ movq %r9, -176(%rbp) # 8-byte Spill
+ sbbq %r10, %r12
+ sbbq %r9, %r8
+ movq %r8, %r10
+ movq 48(%rbx), %r8
+ movq %r8, -192(%rbp) # 8-byte Spill
+ sbbq %r8, %r15
+ movq 56(%rbx), %r8
+ movq %r8, -200(%rbp) # 8-byte Spill
+ sbbq %r8, %r13
+ sbbq $0, %rdi
+ movq 64(%rbx), %r11
+ subq %r11, %rsi
+ movq 72(%rbx), %r8
+ movq %r8, -208(%rbp) # 8-byte Spill
+ sbbq %r8, %rdx
+ movq 80(%rbx), %r8
+ movq %r8, -216(%rbp) # 8-byte Spill
+ sbbq %r8, %rcx
+ movq 88(%rbx), %r8
+ movq %r8, -224(%rbp) # 8-byte Spill
+ sbbq %r8, %rax
+ movq 96(%rbx), %r8
+ movq %r8, -232(%rbp) # 8-byte Spill
+ sbbq %r8, %r12
+ movq 104(%rbx), %r14
+ sbbq %r14, %r10
+ movq 112(%rbx), %r8
+ sbbq %r8, %r15
+ movq 120(%rbx), %r9
+ sbbq %r9, %r13
+ sbbq $0, %rdi
+ addq -184(%rbp), %rsi # 8-byte Folded Reload
+ adcq -176(%rbp), %rdx # 8-byte Folded Reload
+ movq %rsi, 32(%rbx)
+ adcq -192(%rbp), %rcx # 8-byte Folded Reload
+ movq %rdx, 40(%rbx)
+ adcq -200(%rbp), %rax # 8-byte Folded Reload
+ movq %rcx, 48(%rbx)
+ adcq %r11, %r12
+ movq %rax, 56(%rbx)
+ movq %r12, 64(%rbx)
+ adcq -208(%rbp), %r10 # 8-byte Folded Reload
+ movq %r10, 72(%rbx)
+ adcq -216(%rbp), %r15 # 8-byte Folded Reload
+ movq %r15, 80(%rbx)
+ adcq -224(%rbp), %r13 # 8-byte Folded Reload
+ movq %r13, 88(%rbx)
+ adcq -232(%rbp), %rdi # 8-byte Folded Reload
+ movq %rdi, 96(%rbx)
+ adcq $0, %r14
+ movq %r14, 104(%rbx)
+ adcq $0, %r8
+ movq %r8, 112(%rbx)
+ adcq $0, %r9
+ movq %r9, 120(%rbx)
+ addq $200, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end113:
+ .size mcl_fpDbl_sqrPre8Lbmi2, .Lfunc_end113-mcl_fpDbl_sqrPre8Lbmi2
+
+ .globl mcl_fp_mont8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont8Lbmi2,@function
+mcl_fp_mont8Lbmi2: # @mcl_fp_mont8Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1256, %rsp # imm = 0x4E8
+ movq %rcx, %r13
+ movq %r13, 40(%rsp) # 8-byte Spill
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq %rdi, (%rsp) # 8-byte Spill
+ movq -8(%r13), %rbx
+ movq %rbx, 32(%rsp) # 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1184(%rsp), %rdi
+ callq .LmulPv512x64
+ movq 1184(%rsp), %r15
+ movq 1192(%rsp), %r14
+ movq %r15, %rdx
+ imulq %rbx, %rdx
+ movq 1248(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 1240(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 1232(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 1224(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 1216(%rsp), %r12
+ movq 1208(%rsp), %rbx
+ movq 1200(%rsp), %rbp
+ leaq 1112(%rsp), %rdi
+ movq %r13, %rsi
+ callq .LmulPv512x64
+ addq 1112(%rsp), %r15
+ adcq 1120(%rsp), %r14
+ adcq 1128(%rsp), %rbp
+ movq %rbp, 8(%rsp) # 8-byte Spill
+ adcq 1136(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ adcq 1144(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r13 # 8-byte Reload
+ adcq 1152(%rsp), %r13
+ movq 88(%rsp), %rbx # 8-byte Reload
+ adcq 1160(%rsp), %rbx
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 1168(%rsp), %rbp
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 1176(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1040(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %r15d
+ addq 1040(%rsp), %r14
+ movq 8(%rsp), %rax # 8-byte Reload
+ adcq 1048(%rsp), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 1056(%rsp), %rax
+ movq %rax, %r12
+ movq 64(%rsp), %rax # 8-byte Reload
+ adcq 1064(%rsp), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ adcq 1072(%rsp), %r13
+ movq %r13, 72(%rsp) # 8-byte Spill
+ adcq 1080(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ adcq 1088(%rsp), %rbp
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 1096(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ adcq 1104(%rsp), %r15
+ movq %r15, 56(%rsp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq %r14, %rdx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 968(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %r15d
+ addq 968(%rsp), %r14
+ movq 8(%rsp), %r13 # 8-byte Reload
+ adcq 976(%rsp), %r13
+ adcq 984(%rsp), %r12
+ movq %r12, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r14 # 8-byte Reload
+ adcq 992(%rsp), %r14
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 1000(%rsp), %rbx
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 1008(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ adcq 1016(%rsp), %rbp
+ movq %rbp, %r12
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 1024(%rsp), %rbp
+ movq 56(%rsp), %rax # 8-byte Reload
+ adcq 1032(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ adcq $0, %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 896(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %r13, %rcx
+ addq 896(%rsp), %rcx
+ movq 48(%rsp), %r13 # 8-byte Reload
+ adcq 904(%rsp), %r13
+ adcq 912(%rsp), %r14
+ adcq 920(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 928(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ adcq 936(%rsp), %r12
+ movq %r12, 80(%rsp) # 8-byte Spill
+ adcq 944(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r12 # 8-byte Reload
+ adcq 952(%rsp), %r12
+ adcq 960(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq %rcx, %rdx
+ movq %rcx, %rbp
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 824(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %ebx
+ addq 824(%rsp), %rbp
+ adcq 832(%rsp), %r13
+ movq %r13, 48(%rsp) # 8-byte Spill
+ adcq 840(%rsp), %r14
+ movq %r14, 64(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r13 # 8-byte Reload
+ adcq 848(%rsp), %r13
+ movq 88(%rsp), %rbp # 8-byte Reload
+ adcq 856(%rsp), %rbp
+ movq 80(%rsp), %r14 # 8-byte Reload
+ adcq 864(%rsp), %r14
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 872(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ adcq 880(%rsp), %r12
+ adcq 888(%rsp), %r15
+ adcq $0, %rbx
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 752(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 48(%rsp), %rax # 8-byte Reload
+ addq 752(%rsp), %rax
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 760(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 72(%rsp) # 8-byte Spill
+ adcq 776(%rsp), %rbp
+ movq %rbp, 88(%rsp) # 8-byte Spill
+ adcq 784(%rsp), %r14
+ movq %r14, 80(%rsp) # 8-byte Spill
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 792(%rsp), %rbp
+ adcq 800(%rsp), %r12
+ adcq 808(%rsp), %r15
+ adcq 816(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 680(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %r13, %rax
+ andl $1, %eax
+ addq 680(%rsp), %rbx
+ movq 64(%rsp), %r14 # 8-byte Reload
+ adcq 688(%rsp), %r14
+ movq 72(%rsp), %rcx # 8-byte Reload
+ adcq 696(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r13 # 8-byte Reload
+ adcq 704(%rsp), %r13
+ movq 80(%rsp), %rbx # 8-byte Reload
+ adcq 712(%rsp), %rbx
+ adcq 720(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq %r12, %rbp
+ adcq 728(%rsp), %rbp
+ adcq 736(%rsp), %r15
+ movq 48(%rsp), %r12 # 8-byte Reload
+ adcq 744(%rsp), %r12
+ adcq $0, %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 608(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %r14, %rax
+ addq 608(%rsp), %rax
+ movq 72(%rsp), %r14 # 8-byte Reload
+ adcq 616(%rsp), %r14
+ adcq 624(%rsp), %r13
+ movq %r13, 88(%rsp) # 8-byte Spill
+ adcq 632(%rsp), %rbx
+ movq %rbx, %r13
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 640(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ adcq 648(%rsp), %rbp
+ movq %rbp, 56(%rsp) # 8-byte Spill
+ adcq 656(%rsp), %r15
+ adcq 664(%rsp), %r12
+ movq %r12, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 672(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ sbbq %rbp, %rbp
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 536(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %rbp, %rax
+ andl $1, %eax
+ addq 536(%rsp), %rbx
+ adcq 544(%rsp), %r14
+ movq %r14, 72(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rbx # 8-byte Reload
+ adcq 552(%rsp), %rbx
+ adcq 560(%rsp), %r13
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 568(%rsp), %rbp
+ movq 56(%rsp), %r12 # 8-byte Reload
+ adcq 576(%rsp), %r12
+ adcq 584(%rsp), %r15
+ movq 48(%rsp), %rcx # 8-byte Reload
+ adcq 592(%rsp), %rcx
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r14 # 8-byte Reload
+ adcq 600(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 464(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 72(%rsp), %rax # 8-byte Reload
+ addq 464(%rsp), %rax
+ adcq 472(%rsp), %rbx
+ adcq 480(%rsp), %r13
+ movq %r13, 80(%rsp) # 8-byte Spill
+ adcq 488(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ adcq 496(%rsp), %r12
+ adcq 504(%rsp), %r15
+ movq %r15, 72(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r15 # 8-byte Reload
+ adcq 512(%rsp), %r15
+ adcq 520(%rsp), %r14
+ movq %r14, 64(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 528(%rsp), %r14
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %rbp
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 392(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %r13, %rax
+ andl $1, %eax
+ addq 392(%rsp), %rbp
+ adcq 400(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 408(%rsp), %rbp
+ movq 96(%rsp), %rbx # 8-byte Reload
+ adcq 416(%rsp), %rbx
+ adcq 424(%rsp), %r12
+ movq 72(%rsp), %r13 # 8-byte Reload
+ adcq 432(%rsp), %r13
+ adcq 440(%rsp), %r15
+ movq %r15, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r15 # 8-byte Reload
+ adcq 448(%rsp), %r15
+ adcq 456(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 320(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 88(%rsp), %rax # 8-byte Reload
+ addq 320(%rsp), %rax
+ adcq 328(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 336(%rsp), %rbx
+ movq %rbx, 96(%rsp) # 8-byte Spill
+ movq %r12, %rbp
+ adcq 344(%rsp), %rbp
+ adcq 352(%rsp), %r13
+ movq 48(%rsp), %r12 # 8-byte Reload
+ adcq 360(%rsp), %r12
+ adcq 368(%rsp), %r15
+ movq %r15, 64(%rsp) # 8-byte Spill
+ adcq 376(%rsp), %r14
+ movq %r14, 88(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rcx # 8-byte Reload
+ adcq 384(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 248(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %r15d
+ addq 248(%rsp), %rbx
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 256(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 96(%rsp), %r14 # 8-byte Reload
+ adcq 264(%rsp), %r14
+ adcq 272(%rsp), %rbp
+ movq %rbp, 56(%rsp) # 8-byte Spill
+ movq %r13, %rbx
+ adcq 280(%rsp), %rbx
+ movq %r12, %rbp
+ adcq 288(%rsp), %rbp
+ movq 64(%rsp), %r13 # 8-byte Reload
+ adcq 296(%rsp), %r13
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 312(%rsp), %r12
+ adcq $0, %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 176(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 80(%rsp), %rax # 8-byte Reload
+ addq 176(%rsp), %rax
+ adcq 184(%rsp), %r14
+ movq %r14, 96(%rsp) # 8-byte Spill
+ movq 56(%rsp), %rcx # 8-byte Reload
+ adcq 192(%rsp), %rcx
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq 200(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 208(%rsp), %rbp
+ adcq 216(%rsp), %r13
+ movq %r13, 64(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 224(%rsp), %r14
+ adcq 232(%rsp), %r12
+ adcq 240(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq 32(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %r13
+ leaq 104(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %ebx
+ addq 104(%rsp), %r13
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 112(%rsp), %rcx
+ movq 56(%rsp), %rdx # 8-byte Reload
+ adcq 120(%rsp), %rdx
+ movq 72(%rsp), %rsi # 8-byte Reload
+ adcq 128(%rsp), %rsi
+ movq %rbp, %rdi
+ adcq 136(%rsp), %rdi
+ movq %rdi, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r8 # 8-byte Reload
+ adcq 144(%rsp), %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq %r14, %r9
+ adcq 152(%rsp), %r9
+ movq %r9, 88(%rsp) # 8-byte Spill
+ adcq 160(%rsp), %r12
+ adcq 168(%rsp), %r15
+ adcq $0, %rbx
+ movq %rcx, %rax
+ movq %rcx, %r11
+ movq 40(%rsp), %rbp # 8-byte Reload
+ subq (%rbp), %rax
+ movq %rdx, %rcx
+ movq %rdx, %r14
+ sbbq 8(%rbp), %rcx
+ movq %rsi, %rdx
+ movq %rsi, %r13
+ sbbq 16(%rbp), %rdx
+ movq %rdi, %rsi
+ sbbq 24(%rbp), %rsi
+ movq %r8, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %r9, %r10
+ sbbq 40(%rbp), %r10
+ movq %r12, %r8
+ sbbq 48(%rbp), %r8
+ movq %r15, %r9
+ sbbq 56(%rbp), %r9
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %r15, %r9
+ testb %bl, %bl
+ cmovneq %r11, %rax
+ movq (%rsp), %rbx # 8-byte Reload
+ movq %rax, (%rbx)
+ cmovneq %r14, %rcx
+ movq %rcx, 8(%rbx)
+ cmovneq %r13, %rdx
+ movq %rdx, 16(%rbx)
+ cmovneq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovneq 64(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rbx)
+ cmovneq 88(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 40(%rbx)
+ cmovneq %r12, %r8
+ movq %r8, 48(%rbx)
+ movq %r9, 56(%rbx)
+ addq $1256, %rsp # imm = 0x4E8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end114:
+ .size mcl_fp_mont8Lbmi2, .Lfunc_end114-mcl_fp_mont8Lbmi2
+
+ .globl mcl_fp_montNF8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF8Lbmi2,@function
+mcl_fp_montNF8Lbmi2: # @mcl_fp_montNF8Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1240, %rsp # imm = 0x4D8
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq %rdi, (%rsp) # 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 32(%rsp) # 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1168(%rsp), %rdi
+ callq .LmulPv512x64
+ movq 1168(%rsp), %r15
+ movq 1176(%rsp), %r12
+ movq %r15, %rdx
+ imulq %rbx, %rdx
+ movq 1232(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 1224(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 1216(%rsp), %r13
+ movq 1208(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 1200(%rsp), %r14
+ movq 1192(%rsp), %rbp
+ movq 1184(%rsp), %rbx
+ leaq 1096(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 1096(%rsp), %r15
+ adcq 1104(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq 1112(%rsp), %rbx
+ adcq 1120(%rsp), %rbp
+ adcq 1128(%rsp), %r14
+ movq %r14, %r12
+ movq 72(%rsp), %r14 # 8-byte Reload
+ adcq 1136(%rsp), %r14
+ adcq 1144(%rsp), %r13
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 1152(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 1160(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1024(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 1088(%rsp), %r15
+ movq 64(%rsp), %rax # 8-byte Reload
+ addq 1024(%rsp), %rax
+ adcq 1032(%rsp), %rbx
+ movq %rbx, 8(%rsp) # 8-byte Spill
+ movq %rbp, %rbx
+ adcq 1040(%rsp), %rbx
+ adcq 1048(%rsp), %r12
+ adcq 1056(%rsp), %r14
+ movq %r14, 72(%rsp) # 8-byte Spill
+ movq %r13, %rbp
+ adcq 1064(%rsp), %rbp
+ movq 80(%rsp), %rcx # 8-byte Reload
+ adcq 1072(%rsp), %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r14 # 8-byte Reload
+ adcq 1080(%rsp), %r14
+ adcq $0, %r15
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 952(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 952(%rsp), %r13
+ movq 8(%rsp), %rax # 8-byte Reload
+ adcq 960(%rsp), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ adcq 968(%rsp), %rbx
+ movq %rbx, 64(%rsp) # 8-byte Spill
+ movq %r12, %rbx
+ adcq 976(%rsp), %rbx
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 984(%rsp), %r12
+ adcq 992(%rsp), %rbp
+ movq %rbp, 40(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 1000(%rsp), %r13
+ movq %r14, %rbp
+ adcq 1008(%rsp), %rbp
+ adcq 1016(%rsp), %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 880(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 944(%rsp), %r14
+ movq 8(%rsp), %rax # 8-byte Reload
+ addq 880(%rsp), %rax
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 888(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ adcq 896(%rsp), %rbx
+ adcq 904(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq 40(%rsp), %rcx # 8-byte Reload
+ adcq 912(%rsp), %rcx
+ movq %rcx, 40(%rsp) # 8-byte Spill
+ adcq 920(%rsp), %r13
+ movq %r13, 80(%rsp) # 8-byte Spill
+ adcq 928(%rsp), %rbp
+ movq %rbp, 48(%rsp) # 8-byte Spill
+ adcq 936(%rsp), %r15
+ adcq $0, %r14
+ movq %rax, %rdx
+ movq %rax, %rbp
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 808(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 808(%rsp), %rbp
+ movq 64(%rsp), %r13 # 8-byte Reload
+ adcq 816(%rsp), %r13
+ movq %rbx, %r12
+ adcq 824(%rsp), %r12
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 832(%rsp), %rbx
+ movq 40(%rsp), %rbp # 8-byte Reload
+ adcq 840(%rsp), %rbp
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 848(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 856(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ adcq 864(%rsp), %r15
+ adcq 872(%rsp), %r14
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 736(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 800(%rsp), %rax
+ movq %r13, %rcx
+ addq 736(%rsp), %rcx
+ adcq 744(%rsp), %r12
+ movq %r12, 40(%rsp) # 8-byte Spill
+ adcq 752(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 760(%rsp), %rbp
+ movq %rbp, %r13
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 768(%rsp), %rbp
+ movq 48(%rsp), %rbx # 8-byte Reload
+ adcq 776(%rsp), %rbx
+ adcq 784(%rsp), %r15
+ adcq 792(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq %rcx, %rdx
+ movq %rcx, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 664(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 664(%rsp), %r12
+ movq 40(%rsp), %rax # 8-byte Reload
+ adcq 672(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rax # 8-byte Reload
+ adcq 680(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ adcq 688(%rsp), %r13
+ adcq 696(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 704(%rsp), %rbx
+ adcq 712(%rsp), %r15
+ adcq 720(%rsp), %r14
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 728(%rsp), %r12
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 592(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 656(%rsp), %rcx
+ movq 40(%rsp), %rax # 8-byte Reload
+ addq 592(%rsp), %rax
+ movq 72(%rsp), %rbp # 8-byte Reload
+ adcq 600(%rsp), %rbp
+ adcq 608(%rsp), %r13
+ movq %r13, 40(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 616(%rsp), %r13
+ adcq 624(%rsp), %rbx
+ adcq 632(%rsp), %r15
+ adcq 640(%rsp), %r14
+ adcq 648(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 520(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 520(%rsp), %r12
+ adcq 528(%rsp), %rbp
+ movq %rbp, 72(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r12 # 8-byte Reload
+ adcq 536(%rsp), %r12
+ movq %r13, %rbp
+ adcq 544(%rsp), %rbp
+ adcq 552(%rsp), %rbx
+ adcq 560(%rsp), %r15
+ adcq 568(%rsp), %r14
+ movq 64(%rsp), %r13 # 8-byte Reload
+ adcq 576(%rsp), %r13
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 584(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 448(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 512(%rsp), %rcx
+ movq 72(%rsp), %rax # 8-byte Reload
+ addq 448(%rsp), %rax
+ adcq 456(%rsp), %r12
+ movq %r12, 40(%rsp) # 8-byte Spill
+ adcq 464(%rsp), %rbp
+ adcq 472(%rsp), %rbx
+ adcq 480(%rsp), %r15
+ adcq 488(%rsp), %r14
+ adcq 496(%rsp), %r13
+ movq %r13, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 504(%rsp), %r13
+ adcq $0, %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 376(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 376(%rsp), %r12
+ movq 40(%rsp), %rax # 8-byte Reload
+ adcq 384(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ adcq 392(%rsp), %rbp
+ adcq 400(%rsp), %rbx
+ adcq 408(%rsp), %r15
+ adcq 416(%rsp), %r14
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 424(%rsp), %r12
+ adcq 432(%rsp), %r13
+ movq 72(%rsp), %rax # 8-byte Reload
+ adcq 440(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 304(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 368(%rsp), %rcx
+ movq 40(%rsp), %rax # 8-byte Reload
+ addq 304(%rsp), %rax
+ adcq 312(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 320(%rsp), %rbx
+ adcq 328(%rsp), %r15
+ adcq 336(%rsp), %r14
+ adcq 344(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq 352(%rsp), %r13
+ movq 72(%rsp), %rbp # 8-byte Reload
+ adcq 360(%rsp), %rbp
+ adcq $0, %rcx
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 232(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 232(%rsp), %r12
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 240(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ adcq 248(%rsp), %rbx
+ adcq 256(%rsp), %r15
+ adcq 264(%rsp), %r14
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 272(%rsp), %r12
+ adcq 280(%rsp), %r13
+ adcq 288(%rsp), %rbp
+ movq %rbp, 72(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rbp # 8-byte Reload
+ adcq 296(%rsp), %rbp
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 160(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 224(%rsp), %rcx
+ movq 80(%rsp), %rax # 8-byte Reload
+ addq 160(%rsp), %rax
+ adcq 168(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ adcq 176(%rsp), %r15
+ adcq 184(%rsp), %r14
+ adcq 192(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq 200(%rsp), %r13
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 208(%rsp), %rbx
+ adcq 216(%rsp), %rbp
+ movq %rbp, %r12
+ adcq $0, %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbp
+ leaq 88(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 88(%rsp), %rbp
+ movq 48(%rsp), %r11 # 8-byte Reload
+ adcq 96(%rsp), %r11
+ adcq 104(%rsp), %r15
+ adcq 112(%rsp), %r14
+ movq 64(%rsp), %rsi # 8-byte Reload
+ adcq 120(%rsp), %rsi
+ movq %rsi, 64(%rsp) # 8-byte Spill
+ adcq 128(%rsp), %r13
+ adcq 136(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 144(%rsp), %r12
+ movq 80(%rsp), %r8 # 8-byte Reload
+ adcq 152(%rsp), %r8
+ movq %r11, %rax
+ movq 56(%rsp), %rbp # 8-byte Reload
+ subq (%rbp), %rax
+ movq %r15, %rcx
+ sbbq 8(%rbp), %rcx
+ movq %r14, %rdx
+ sbbq 16(%rbp), %rdx
+ sbbq 24(%rbp), %rsi
+ movq %r13, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %rbx, %r9
+ sbbq 40(%rbp), %r9
+ movq %r12, %r10
+ sbbq 48(%rbp), %r10
+ movq %rbp, %rbx
+ movq %r8, %rbp
+ sbbq 56(%rbx), %rbp
+ testq %rbp, %rbp
+ cmovsq %r11, %rax
+ movq (%rsp), %rbx # 8-byte Reload
+ movq %rax, (%rbx)
+ cmovsq %r15, %rcx
+ movq %rcx, 8(%rbx)
+ cmovsq %r14, %rdx
+ movq %rdx, 16(%rbx)
+ cmovsq 64(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovsq %r13, %rdi
+ movq %rdi, 32(%rbx)
+ cmovsq 72(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 40(%rbx)
+ cmovsq %r12, %r10
+ movq %r10, 48(%rbx)
+ cmovsq %r8, %rbp
+ movq %rbp, 56(%rbx)
+ addq $1240, %rsp # imm = 0x4D8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end115:
+ .size mcl_fp_montNF8Lbmi2, .Lfunc_end115-mcl_fp_montNF8Lbmi2
+
+ .globl mcl_fp_montRed8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed8Lbmi2,@function
+mcl_fp_montRed8Lbmi2: # @mcl_fp_montRed8Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $776, %rsp # imm = 0x308
+ movq %rdx, %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ movq %rdi, 72(%rsp) # 8-byte Spill
+ movq -8(%rax), %rcx
+ movq %rcx, 128(%rsp) # 8-byte Spill
+ movq (%rsi), %r15
+ movq 8(%rsi), %rdx
+ movq %rdx, 184(%rsp) # 8-byte Spill
+ movq %r15, %rdx
+ imulq %rcx, %rdx
+ movq 120(%rsi), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ movq 112(%rsi), %rcx
+ movq %rcx, 136(%rsp) # 8-byte Spill
+ movq 104(%rsi), %rcx
+ movq %rcx, 120(%rsp) # 8-byte Spill
+ movq 96(%rsi), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ movq 88(%rsi), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 80(%rsi), %rcx
+ movq %rcx, 160(%rsp) # 8-byte Spill
+ movq 72(%rsi), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ movq 64(%rsi), %r13
+ movq 56(%rsi), %rcx
+ movq %rcx, 144(%rsp) # 8-byte Spill
+ movq 48(%rsi), %r14
+ movq 40(%rsi), %rcx
+ movq %rcx, 152(%rsp) # 8-byte Spill
+ movq 32(%rsi), %r12
+ movq 24(%rsi), %rbx
+ movq 16(%rsi), %rbp
+ movq %rax, %rcx
+ movq (%rcx), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq 56(%rcx), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 48(%rcx), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq %rcx, %rsi
+ leaq 704(%rsp), %rdi
+ callq .LmulPv512x64
+ addq 704(%rsp), %r15
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 712(%rsp), %rcx
+ adcq 720(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 728(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ adcq 736(%rsp), %r12
+ movq %r12, 104(%rsp) # 8-byte Spill
+ movq 152(%rsp), %rax # 8-byte Reload
+ adcq 744(%rsp), %rax
+ movq %rax, 152(%rsp) # 8-byte Spill
+ adcq 752(%rsp), %r14
+ movq %r14, %r12
+ movq 144(%rsp), %rax # 8-byte Reload
+ adcq 760(%rsp), %rax
+ movq %rax, 144(%rsp) # 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 184(%rsp) # 8-byte Spill
+ adcq $0, 192(%rsp) # 8-byte Folded Spill
+ movq 160(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 176(%rsp) # 8-byte Folded Spill
+ adcq $0, 168(%rsp) # 8-byte Folded Spill
+ adcq $0, 120(%rsp) # 8-byte Folded Spill
+ movq 136(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ movq 96(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ sbbq %rbx, %rbx
+ movq %rcx, %rbp
+ movq %rbp, %rdx
+ imulq 128(%rsp), %rdx # 8-byte Folded Reload
+ leaq 632(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 632(%rsp), %rbp
+ movq 80(%rsp), %rsi # 8-byte Reload
+ adcq 640(%rsp), %rsi
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 648(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rcx # 8-byte Reload
+ adcq 656(%rsp), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ movq 152(%rsp), %rcx # 8-byte Reload
+ adcq 664(%rsp), %rcx
+ movq %rcx, 152(%rsp) # 8-byte Spill
+ adcq 672(%rsp), %r12
+ movq 144(%rsp), %rcx # 8-byte Reload
+ adcq 680(%rsp), %rcx
+ movq %rcx, 144(%rsp) # 8-byte Spill
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 688(%rsp), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 696(%rsp), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ adcq $0, %r15
+ movq %r15, 160(%rsp) # 8-byte Spill
+ movq 176(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ movq 168(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 120(%rsp) # 8-byte Folded Spill
+ adcq $0, %r13
+ movq %r13, 136(%rsp) # 8-byte Spill
+ adcq $0, %r14
+ movq %r14, 96(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ adcq $0, %rbp
+ movq %rsi, %rdx
+ movq %rsi, %r14
+ imulq 128(%rsp), %rdx # 8-byte Folded Reload
+ leaq 560(%rsp), %rdi
+ movq 112(%rsp), %r13 # 8-byte Reload
+ movq %r13, %rsi
+ callq .LmulPv512x64
+ addq 560(%rsp), %r14
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 568(%rsp), %rcx
+ movq 104(%rsp), %rax # 8-byte Reload
+ adcq 576(%rsp), %rax
+ movq %rax, 104(%rsp) # 8-byte Spill
+ movq 152(%rsp), %rax # 8-byte Reload
+ adcq 584(%rsp), %rax
+ movq %rax, 152(%rsp) # 8-byte Spill
+ adcq 592(%rsp), %r12
+ movq %r12, 88(%rsp) # 8-byte Spill
+ movq 144(%rsp), %r14 # 8-byte Reload
+ adcq 600(%rsp), %r14
+ movq 184(%rsp), %rax # 8-byte Reload
+ adcq 608(%rsp), %rax
+ movq %rax, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rax # 8-byte Reload
+ adcq 616(%rsp), %rax
+ movq %rax, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rax # 8-byte Reload
+ adcq 624(%rsp), %rax
+ movq %rax, 160(%rsp) # 8-byte Spill
+ adcq $0, %rbx
+ movq %rbx, 176(%rsp) # 8-byte Spill
+ adcq $0, %r15
+ movq %r15, 168(%rsp) # 8-byte Spill
+ movq 120(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ movq 136(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 96(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ movq %rcx, %rbp
+ movq %rbp, %rdx
+ movq 128(%rsp), %r12 # 8-byte Reload
+ imulq %r12, %rdx
+ leaq 488(%rsp), %rdi
+ movq %r13, %rsi
+ callq .LmulPv512x64
+ addq 488(%rsp), %rbp
+ movq 104(%rsp), %rax # 8-byte Reload
+ adcq 496(%rsp), %rax
+ movq 152(%rsp), %rbp # 8-byte Reload
+ adcq 504(%rsp), %rbp
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 512(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ adcq 520(%rsp), %r14
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 528(%rsp), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 536(%rsp), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %r13 # 8-byte Reload
+ adcq 544(%rsp), %r13
+ movq 176(%rsp), %rcx # 8-byte Reload
+ adcq 552(%rsp), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ adcq $0, 168(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 120(%rsp) # 8-byte Spill
+ movq %r15, %rbx
+ adcq $0, %rbx
+ adcq $0, 96(%rsp) # 8-byte Folded Spill
+ adcq $0, 80(%rsp) # 8-byte Folded Spill
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq %r12, %rdx
+ leaq 416(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 416(%rsp), %r15
+ adcq 424(%rsp), %rbp
+ movq %rbp, %rax
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ movq %r14, %r12
+ adcq 440(%rsp), %r12
+ movq 184(%rsp), %r14 # 8-byte Reload
+ adcq 448(%rsp), %r14
+ movq 192(%rsp), %rbp # 8-byte Reload
+ adcq 456(%rsp), %rbp
+ adcq 464(%rsp), %r13
+ movq 176(%rsp), %rcx # 8-byte Reload
+ adcq 472(%rsp), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 168(%rsp), %rcx # 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ adcq $0, 120(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 136(%rsp) # 8-byte Spill
+ movq 96(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 80(%rsp) # 8-byte Folded Spill
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 128(%rsp), %rdx # 8-byte Folded Reload
+ leaq 344(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 344(%rsp), %rbx
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 352(%rsp), %rax
+ adcq 360(%rsp), %r12
+ movq %r12, 144(%rsp) # 8-byte Spill
+ adcq 368(%rsp), %r14
+ movq %r14, 184(%rsp) # 8-byte Spill
+ adcq 376(%rsp), %rbp
+ movq %rbp, 192(%rsp) # 8-byte Spill
+ adcq 384(%rsp), %r13
+ movq %r13, 160(%rsp) # 8-byte Spill
+ movq 176(%rsp), %r13 # 8-byte Reload
+ adcq 392(%rsp), %r13
+ movq 168(%rsp), %r12 # 8-byte Reload
+ adcq 400(%rsp), %r12
+ movq 120(%rsp), %r14 # 8-byte Reload
+ adcq 408(%rsp), %r14
+ movq 136(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ movq %r15, %rbx
+ adcq $0, %rbx
+ adcq $0, 80(%rsp) # 8-byte Folded Spill
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq 128(%rsp), %rdx # 8-byte Folded Reload
+ leaq 272(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 272(%rsp), %r15
+ movq 144(%rsp), %rcx # 8-byte Reload
+ adcq 280(%rsp), %rcx
+ movq 184(%rsp), %rax # 8-byte Reload
+ adcq 288(%rsp), %rax
+ movq %rax, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rax # 8-byte Reload
+ adcq 296(%rsp), %rax
+ movq %rax, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rax # 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 160(%rsp) # 8-byte Spill
+ adcq 312(%rsp), %r13
+ movq %r13, 176(%rsp) # 8-byte Spill
+ adcq 320(%rsp), %r12
+ movq %r12, 168(%rsp) # 8-byte Spill
+ adcq 328(%rsp), %r14
+ movq %r14, %r13
+ adcq 336(%rsp), %rbp
+ movq %rbp, %r12
+ adcq $0, %rbx
+ movq %rbx, %r14
+ movq 80(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ movq 128(%rsp), %rdx # 8-byte Reload
+ movq %rcx, %rbx
+ imulq %rbx, %rdx
+ leaq 200(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 200(%rsp), %rbx
+ movq 184(%rsp), %rax # 8-byte Reload
+ adcq 208(%rsp), %rax
+ movq %rax, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %r8 # 8-byte Reload
+ adcq 216(%rsp), %r8
+ movq %r8, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rdx # 8-byte Reload
+ adcq 224(%rsp), %rdx
+ movq 176(%rsp), %rsi # 8-byte Reload
+ adcq 232(%rsp), %rsi
+ movq 168(%rsp), %rdi # 8-byte Reload
+ adcq 240(%rsp), %rdi
+ movq %r13, %rbp
+ adcq 248(%rsp), %rbp
+ movq %r12, %rbx
+ adcq 256(%rsp), %rbx
+ movq %rbx, 136(%rsp) # 8-byte Spill
+ movq %r14, %r9
+ adcq 264(%rsp), %r9
+ adcq $0, %r15
+ movq %r15, %r10
+ subq 16(%rsp), %rax # 8-byte Folded Reload
+ movq %r8, %rcx
+ sbbq 8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rdx, %r13
+ sbbq 24(%rsp), %r13 # 8-byte Folded Reload
+ movq %rsi, %r12
+ sbbq 32(%rsp), %r12 # 8-byte Folded Reload
+ movq %rdi, %r14
+ sbbq 40(%rsp), %r14 # 8-byte Folded Reload
+ movq %rbp, %r11
+ sbbq 48(%rsp), %r11 # 8-byte Folded Reload
+ movq %rbx, %r8
+ sbbq 56(%rsp), %r8 # 8-byte Folded Reload
+ movq %r9, %r15
+ sbbq 64(%rsp), %r9 # 8-byte Folded Reload
+ sbbq $0, %r10
+ andl $1, %r10d
+ cmovneq %r15, %r9
+ testb %r10b, %r10b
+ cmovneq 184(%rsp), %rax # 8-byte Folded Reload
+ movq 72(%rsp), %rbx # 8-byte Reload
+ movq %rax, (%rbx)
+ cmovneq 192(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 8(%rbx)
+ cmovneq %rdx, %r13
+ movq %r13, 16(%rbx)
+ cmovneq %rsi, %r12
+ movq %r12, 24(%rbx)
+ cmovneq %rdi, %r14
+ movq %r14, 32(%rbx)
+ cmovneq %rbp, %r11
+ movq %r11, 40(%rbx)
+ cmovneq 136(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 48(%rbx)
+ movq %r9, 56(%rbx)
+ addq $776, %rsp # imm = 0x308
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end116:
+ .size mcl_fp_montRed8Lbmi2, .Lfunc_end116-mcl_fp_montRed8Lbmi2
+
+ .globl mcl_fp_addPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre8Lbmi2,@function
+mcl_fp_addPre8Lbmi2: # @mcl_fp_addPre8Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 56(%rsi), %r15
+ movq 48(%rdx), %r9
+ movq 48(%rsi), %r12
+ movq 40(%rdx), %r10
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %r14
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rsi
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r14, %rax
+ movq %rax, 24(%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r10, %r13
+ movq %r13, 40(%rdi)
+ adcq %r9, %r12
+ movq %r12, 48(%rdi)
+ adcq %r8, %r15
+ movq %r15, 56(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end117:
+ .size mcl_fp_addPre8Lbmi2, .Lfunc_end117-mcl_fp_addPre8Lbmi2
+
+ .globl mcl_fp_subPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre8Lbmi2,@function
+mcl_fp_subPre8Lbmi2: # @mcl_fp_subPre8Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 56(%rsi), %r15
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %r12
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %r12
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq 48(%rsi), %r13
+ movq 40(%rsi), %rdx
+ movq 32(%rsi), %rbp
+ movq 24(%rsi), %rsi
+ movq %rbx, (%rdi)
+ movq %r12, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ sbbq %r14, %rbp
+ movq %rbp, 32(%rdi)
+ sbbq %r10, %rdx
+ movq %rdx, 40(%rdi)
+ sbbq %r9, %r13
+ movq %r13, 48(%rdi)
+ sbbq %r8, %r15
+ movq %r15, 56(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end118:
+ .size mcl_fp_subPre8Lbmi2, .Lfunc_end118-mcl_fp_subPre8Lbmi2
+
+ .globl mcl_fp_shr1_8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_8Lbmi2,@function
+mcl_fp_shr1_8Lbmi2: # @mcl_fp_shr1_8Lbmi2
+# BB#0:
+ movq 56(%rsi), %r8
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r10
+ movq 32(%rsi), %r11
+ movq 24(%rsi), %rcx
+ movq 16(%rsi), %rdx
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rax
+ movq %rax, (%rdi)
+ shrdq $1, %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 16(%rdi)
+ shrdq $1, %r11, %rcx
+ movq %rcx, 24(%rdi)
+ shrdq $1, %r10, %r11
+ movq %r11, 32(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 40(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 48(%rdi)
+ shrq %r8
+ movq %r8, 56(%rdi)
+ retq
+.Lfunc_end119:
+ .size mcl_fp_shr1_8Lbmi2, .Lfunc_end119-mcl_fp_shr1_8Lbmi2
+
+ .globl mcl_fp_add8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add8Lbmi2,@function
+mcl_fp_add8Lbmi2: # @mcl_fp_add8Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r15
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r12
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %r11
+ movq 32(%rsi), %r10
+ movq (%rdx), %r14
+ movq 8(%rdx), %rbx
+ addq (%rsi), %r14
+ adcq 8(%rsi), %rbx
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r11
+ movq 40(%rdx), %rsi
+ adcq 32(%rdx), %r10
+ movq %r14, (%rdi)
+ movq %rbx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r10, 32(%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r12, %r9
+ movq %r9, 48(%rdi)
+ adcq %r15, %r8
+ movq %r8, 56(%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %r14
+ sbbq 8(%rcx), %rbx
+ sbbq 16(%rcx), %rax
+ sbbq 24(%rcx), %r11
+ sbbq 32(%rcx), %r10
+ sbbq 40(%rcx), %rsi
+ sbbq 48(%rcx), %r9
+ sbbq 56(%rcx), %r8
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne .LBB120_2
+# BB#1: # %nocarry
+ movq %r14, (%rdi)
+ movq %rbx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r10, 32(%rdi)
+ movq %rsi, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %r8, 56(%rdi)
+.LBB120_2: # %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end120:
+ .size mcl_fp_add8Lbmi2, .Lfunc_end120-mcl_fp_add8Lbmi2
+
+ .globl mcl_fp_addNF8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF8Lbmi2,@function
+mcl_fp_addNF8Lbmi2: # @mcl_fp_addNF8Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 48(%rdx), %rbp
+ movq 40(%rdx), %rbx
+ movq 32(%rdx), %rax
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r15
+ movq (%rdx), %r13
+ movq 8(%rdx), %r12
+ addq (%rsi), %r13
+ adcq 8(%rsi), %r12
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ adcq 40(%rsi), %rbx
+ movq %rbx, -16(%rsp) # 8-byte Spill
+ movq %rbx, %r9
+ adcq 48(%rsi), %rbp
+ movq %rbp, -8(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ adcq 56(%rsi), %r8
+ movq %r13, %rsi
+ subq (%rcx), %rsi
+ movq %r12, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r15, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r11, %r14
+ sbbq 24(%rcx), %r14
+ movq %r10, %rbp
+ sbbq 32(%rcx), %rbp
+ movq %r9, %r10
+ sbbq 40(%rcx), %r10
+ movq %rax, %r9
+ sbbq 48(%rcx), %r9
+ movq %r8, %rax
+ sbbq 56(%rcx), %rax
+ testq %rax, %rax
+ cmovsq %r13, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r12, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r15, %rbx
+ movq %rbx, 16(%rdi)
+ cmovsq %r11, %r14
+ movq %r14, 24(%rdi)
+ cmovsq -24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 32(%rdi)
+ cmovsq -16(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 40(%rdi)
+ cmovsq -8(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end121:
+ .size mcl_fp_addNF8Lbmi2, .Lfunc_end121-mcl_fp_addNF8Lbmi2
+
+ .globl mcl_fp_sub8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub8Lbmi2,@function
+mcl_fp_sub8Lbmi2: # @mcl_fp_sub8Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r12
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r13
+ movq (%rsi), %rax
+ movq 8(%rsi), %r10
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r10
+ movq 16(%rsi), %r11
+ sbbq 16(%rdx), %r11
+ movq 24(%rsi), %r15
+ sbbq 24(%rdx), %r15
+ movq 32(%rsi), %r14
+ sbbq 32(%rdx), %r14
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %rsi
+ sbbq 40(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r10, 8(%rdi)
+ movq %r11, 16(%rdi)
+ movq %r15, 24(%rdi)
+ movq %r14, 32(%rdi)
+ movq %rsi, 40(%rdi)
+ sbbq %r13, %r9
+ movq %r9, 48(%rdi)
+ sbbq %r12, %r8
+ movq %r8, 56(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB122_2
+# BB#1: # %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ movq 8(%rcx), %rax
+ adcq %r10, %rax
+ movq %rax, 8(%rdi)
+ movq 16(%rcx), %rax
+ adcq %r11, %rax
+ movq %rax, 16(%rdi)
+ movq 24(%rcx), %rax
+ adcq %r15, %rax
+ movq %rax, 24(%rdi)
+ movq 32(%rcx), %rax
+ adcq %r14, %rax
+ movq %rax, 32(%rdi)
+ movq 40(%rcx), %rax
+ adcq %rsi, %rax
+ movq %rax, 40(%rdi)
+ movq 48(%rcx), %rax
+ adcq %r9, %rax
+ movq %rax, 48(%rdi)
+ movq 56(%rcx), %rax
+ adcq %r8, %rax
+ movq %rax, 56(%rdi)
+.LBB122_2: # %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end122:
+ .size mcl_fp_sub8Lbmi2, .Lfunc_end122-mcl_fp_sub8Lbmi2
+
+ .globl mcl_fp_subNF8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF8Lbmi2,@function
+mcl_fp_subNF8Lbmi2: # @mcl_fp_subNF8Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq %rdi, %r9
+ movq 56(%rsi), %r14
+ movq 48(%rsi), %rax
+ movq 40(%rsi), %rcx
+ movq 32(%rsi), %rdi
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r15
+ movq (%rsi), %r13
+ movq 8(%rsi), %r12
+ subq (%rdx), %r13
+ sbbq 8(%rdx), %r12
+ sbbq 16(%rdx), %r15
+ sbbq 24(%rdx), %r11
+ sbbq 32(%rdx), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ sbbq 40(%rdx), %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ sbbq 48(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ sbbq 56(%rdx), %r14
+ movq %r14, %rsi
+ sarq $63, %rsi
+ movq 56(%r8), %r10
+ andq %rsi, %r10
+ movq 48(%r8), %rbx
+ andq %rsi, %rbx
+ movq 40(%r8), %rdi
+ andq %rsi, %rdi
+ movq 32(%r8), %rbp
+ andq %rsi, %rbp
+ movq 24(%r8), %rdx
+ andq %rsi, %rdx
+ movq 16(%r8), %rcx
+ andq %rsi, %rcx
+ movq 8(%r8), %rax
+ andq %rsi, %rax
+ andq (%r8), %rsi
+ addq %r13, %rsi
+ adcq %r12, %rax
+ movq %rsi, (%r9)
+ adcq %r15, %rcx
+ movq %rax, 8(%r9)
+ movq %rcx, 16(%r9)
+ adcq %r11, %rdx
+ movq %rdx, 24(%r9)
+ adcq -24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 32(%r9)
+ adcq -16(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 40(%r9)
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 48(%r9)
+ adcq %r14, %r10
+ movq %r10, 56(%r9)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end123:
+ .size mcl_fp_subNF8Lbmi2, .Lfunc_end123-mcl_fp_subNF8Lbmi2
+
+ .globl mcl_fpDbl_add8Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add8Lbmi2,@function
+mcl_fpDbl_add8Lbmi2: # @mcl_fpDbl_add8Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 120(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 112(%rdx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 104(%rdx), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 96(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r11
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %r15
+ adcq 32(%rdx), %r11
+ movq 88(%rdx), %rbp
+ movq 80(%rdx), %r13
+ movq %rbx, (%rdi)
+ movq 72(%rdx), %r10
+ movq %rax, 8(%rdi)
+ movq 64(%rdx), %r9
+ movq %r12, 16(%rdi)
+ movq 40(%rdx), %r12
+ movq %r15, 24(%rdi)
+ movq 40(%rsi), %rbx
+ adcq %r12, %rbx
+ movq 56(%rdx), %r15
+ movq 48(%rdx), %r12
+ movq %r11, 32(%rdi)
+ movq 48(%rsi), %rdx
+ adcq %r12, %rdx
+ movq 120(%rsi), %r12
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rax
+ adcq %r15, %rax
+ movq 112(%rsi), %rcx
+ movq %rdx, 48(%rdi)
+ movq 64(%rsi), %rbx
+ adcq %r9, %rbx
+ movq 104(%rsi), %rdx
+ movq %rax, 56(%rdi)
+ movq 72(%rsi), %r9
+ adcq %r10, %r9
+ movq 80(%rsi), %r11
+ adcq %r13, %r11
+ movq 96(%rsi), %rax
+ movq 88(%rsi), %r15
+ adcq %rbp, %r15
+ adcq %r14, %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rdx, %rax
+ adcq -32(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -32(%rsp) # 8-byte Spill
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ adcq -8(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, -8(%rsp) # 8-byte Spill
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq %rbx, %rsi
+ subq (%r8), %rsi
+ movq %r9, %rdx
+ sbbq 8(%r8), %rdx
+ movq %r11, %r10
+ sbbq 16(%r8), %r10
+ movq %r15, %r14
+ sbbq 24(%r8), %r14
+ movq -16(%rsp), %r13 # 8-byte Reload
+ sbbq 32(%r8), %r13
+ movq %rax, %r12
+ sbbq 40(%r8), %r12
+ movq %rcx, %rax
+ sbbq 48(%r8), %rax
+ movq -8(%rsp), %rcx # 8-byte Reload
+ sbbq 56(%r8), %rcx
+ sbbq $0, %rbp
+ andl $1, %ebp
+ cmovneq %rbx, %rsi
+ movq %rsi, 64(%rdi)
+ testb %bpl, %bpl
+ cmovneq %r9, %rdx
+ movq %rdx, 72(%rdi)
+ cmovneq %r11, %r10
+ movq %r10, 80(%rdi)
+ cmovneq %r15, %r14
+ movq %r14, 88(%rdi)
+ cmovneq -16(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 96(%rdi)
+ cmovneq -32(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, 104(%rdi)
+ cmovneq -24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 112(%rdi)
+ cmovneq -8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 120(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end124:
+ .size mcl_fpDbl_add8Lbmi2, .Lfunc_end124-mcl_fpDbl_add8Lbmi2
+
+ .globl mcl_fpDbl_sub8Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub8Lbmi2,@function
+mcl_fpDbl_sub8Lbmi2: # @mcl_fpDbl_sub8Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r15
+ movq 120(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 112(%rdx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 104(%rdx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r9
+ movq (%rsi), %r12
+ movq 8(%rsi), %r14
+ xorl %r8d, %r8d
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r14
+ sbbq 16(%rdx), %r9
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r13
+ sbbq 32(%rdx), %r13
+ movq 96(%rdx), %rbp
+ movq 88(%rdx), %r11
+ movq %r12, (%rdi)
+ movq 80(%rdx), %r12
+ movq %r14, 8(%rdi)
+ movq 72(%rdx), %r10
+ movq %r9, 16(%rdi)
+ movq 40(%rdx), %r9
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %r9, %rbx
+ movq 48(%rdx), %r9
+ movq %r13, 32(%rdi)
+ movq 48(%rsi), %r14
+ sbbq %r9, %r14
+ movq 64(%rdx), %r13
+ movq 56(%rdx), %r9
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rdx
+ sbbq %r9, %rdx
+ movq 120(%rsi), %rcx
+ movq %r14, 48(%rdi)
+ movq 64(%rsi), %rbx
+ sbbq %r13, %rbx
+ movq 112(%rsi), %rax
+ movq %rdx, 56(%rdi)
+ movq 72(%rsi), %r9
+ sbbq %r10, %r9
+ movq 80(%rsi), %r13
+ sbbq %r12, %r13
+ movq 88(%rsi), %r12
+ sbbq %r11, %r12
+ movq 104(%rsi), %rdx
+ movq 96(%rsi), %r14
+ sbbq %rbp, %r14
+ sbbq -24(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ sbbq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -16(%rsp) # 8-byte Spill
+ sbbq -8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -8(%rsp) # 8-byte Spill
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%r15), %r11
+ cmoveq %r8, %r11
+ testb %bpl, %bpl
+ movq 16(%r15), %rbp
+ cmoveq %r8, %rbp
+ movq 8(%r15), %rsi
+ cmoveq %r8, %rsi
+ movq 56(%r15), %r10
+ cmoveq %r8, %r10
+ movq 48(%r15), %rdx
+ cmoveq %r8, %rdx
+ movq 40(%r15), %rcx
+ cmoveq %r8, %rcx
+ movq 32(%r15), %rax
+ cmoveq %r8, %rax
+ cmovneq 24(%r15), %r8
+ addq %rbx, %r11
+ adcq %r9, %rsi
+ movq %r11, 64(%rdi)
+ adcq %r13, %rbp
+ movq %rsi, 72(%rdi)
+ movq %rbp, 80(%rdi)
+ adcq %r12, %r8
+ movq %r8, 88(%rdi)
+ adcq %r14, %rax
+ movq %rax, 96(%rdi)
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 104(%rdi)
+ adcq -16(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 112(%rdi)
+ adcq -8(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 120(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end125:
+ .size mcl_fpDbl_sub8Lbmi2, .Lfunc_end125-mcl_fpDbl_sub8Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv576x64,@function
+.LmulPv576x64: # @mulPv576x64
+# BB#0:
+ mulxq (%rsi), %rcx, %rax
+ movq %rcx, (%rdi)
+ mulxq 8(%rsi), %rcx, %r8
+ addq %rax, %rcx
+ movq %rcx, 8(%rdi)
+ mulxq 16(%rsi), %rcx, %r9
+ adcq %r8, %rcx
+ movq %rcx, 16(%rdi)
+ mulxq 24(%rsi), %rax, %rcx
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ mulxq 32(%rsi), %rax, %r8
+ adcq %rcx, %rax
+ movq %rax, 32(%rdi)
+ mulxq 40(%rsi), %rcx, %r9
+ adcq %r8, %rcx
+ movq %rcx, 40(%rdi)
+ mulxq 48(%rsi), %rax, %rcx
+ adcq %r9, %rax
+ movq %rax, 48(%rdi)
+ mulxq 56(%rsi), %rax, %r8
+ adcq %rcx, %rax
+ movq %rax, 56(%rdi)
+ mulxq 64(%rsi), %rax, %rcx
+ adcq %r8, %rax
+ movq %rax, 64(%rdi)
+ adcq $0, %rcx
+ movq %rcx, 72(%rdi)
+ movq %rdi, %rax
+ retq
+.Lfunc_end126:
+ .size .LmulPv576x64, .Lfunc_end126-.LmulPv576x64
+
+ .globl mcl_fp_mulUnitPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre9Lbmi2,@function
+mcl_fp_mulUnitPre9Lbmi2: # @mcl_fp_mulUnitPre9Lbmi2
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ subq $88, %rsp
+ movq %rdi, %rbx
+ leaq 8(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 80(%rsp), %r8
+ movq 72(%rsp), %r9
+ movq 64(%rsp), %r10
+ movq 56(%rsp), %r11
+ movq 48(%rsp), %r14
+ movq 40(%rsp), %rax
+ movq 32(%rsp), %rcx
+ movq 24(%rsp), %rdx
+ movq 8(%rsp), %rsi
+ movq 16(%rsp), %rdi
+ movq %rsi, (%rbx)
+ movq %rdi, 8(%rbx)
+ movq %rdx, 16(%rbx)
+ movq %rcx, 24(%rbx)
+ movq %rax, 32(%rbx)
+ movq %r14, 40(%rbx)
+ movq %r11, 48(%rbx)
+ movq %r10, 56(%rbx)
+ movq %r9, 64(%rbx)
+ movq %r8, 72(%rbx)
+ addq $88, %rsp
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end127:
+ .size mcl_fp_mulUnitPre9Lbmi2, .Lfunc_end127-mcl_fp_mulUnitPre9Lbmi2
+
+ .globl mcl_fpDbl_mulPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre9Lbmi2,@function
+mcl_fpDbl_mulPre9Lbmi2: # @mcl_fpDbl_mulPre9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $808, %rsp # imm = 0x328
+ movq %rdx, %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq %rsi, 72(%rsp) # 8-byte Spill
+ movq %rdi, %r12
+ movq %r12, 80(%rsp) # 8-byte Spill
+ movq (%rax), %rdx
+ movq %rax, %rbx
+ leaq 728(%rsp), %rdi
+ movq %rsi, %rbp
+ callq .LmulPv576x64
+ movq 800(%rsp), %r13
+ movq 792(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 784(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 776(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 768(%rsp), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq 760(%rsp), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq 752(%rsp), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq 744(%rsp), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq 728(%rsp), %rax
+ movq 736(%rsp), %r14
+ movq %rax, (%r12)
+ movq 8(%rbx), %rdx
+ leaq 648(%rsp), %rdi
+ movq %rbp, %rsi
+ callq .LmulPv576x64
+ movq 720(%rsp), %r8
+ movq 712(%rsp), %rcx
+ movq 704(%rsp), %rdx
+ movq 696(%rsp), %rsi
+ movq 688(%rsp), %rdi
+ movq 680(%rsp), %rbp
+ addq 648(%rsp), %r14
+ movq 672(%rsp), %rax
+ movq 656(%rsp), %rbx
+ movq 664(%rsp), %r15
+ movq %r14, 8(%r12)
+ adcq 8(%rsp), %rbx # 8-byte Folded Reload
+ adcq 16(%rsp), %r15 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, %r14
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 24(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 32(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 40(%rsp) # 8-byte Spill
+ adcq %r13, %rcx
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r13 # 8-byte Reload
+ movq 16(%r13), %rdx
+ leaq 568(%rsp), %rdi
+ movq 72(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 640(%rsp), %r8
+ movq 632(%rsp), %r9
+ movq 624(%rsp), %r10
+ movq 616(%rsp), %rdi
+ movq 608(%rsp), %rbp
+ movq 600(%rsp), %rcx
+ addq 568(%rsp), %rbx
+ movq 592(%rsp), %rdx
+ movq 576(%rsp), %r12
+ movq 584(%rsp), %rsi
+ movq 80(%rsp), %rax # 8-byte Reload
+ movq %rbx, 16(%rax)
+ adcq %r15, %r12
+ adcq %r14, %rsi
+ movq %rsi, (%rsp) # 8-byte Spill
+ adcq 16(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 24(%r13), %rdx
+ leaq 488(%rsp), %rdi
+ movq 72(%rsp), %r15 # 8-byte Reload
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 560(%rsp), %r8
+ movq 552(%rsp), %rcx
+ movq 544(%rsp), %rdx
+ movq 536(%rsp), %rsi
+ movq 528(%rsp), %rdi
+ movq 520(%rsp), %rbp
+ addq 488(%rsp), %r12
+ movq 512(%rsp), %rax
+ movq 496(%rsp), %rbx
+ movq 504(%rsp), %r13
+ movq 80(%rsp), %r14 # 8-byte Reload
+ movq %r12, 24(%r14)
+ adcq (%rsp), %rbx # 8-byte Folded Reload
+ adcq 8(%rsp), %r13 # 8-byte Folded Reload
+ adcq 16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r12 # 8-byte Reload
+ movq 32(%r12), %rdx
+ leaq 408(%rsp), %rdi
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 480(%rsp), %r8
+ movq 472(%rsp), %r9
+ movq 464(%rsp), %rdx
+ movq 456(%rsp), %rsi
+ movq 448(%rsp), %rdi
+ movq 440(%rsp), %rbp
+ addq 408(%rsp), %rbx
+ movq 432(%rsp), %rax
+ movq 416(%rsp), %r15
+ movq 424(%rsp), %rcx
+ movq %rbx, 32(%r14)
+ adcq %r13, %r15
+ adcq 8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, (%rsp) # 8-byte Spill
+ adcq 16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq %r12, %r14
+ movq 40(%r14), %rdx
+ leaq 328(%rsp), %rdi
+ movq 72(%rsp), %r13 # 8-byte Reload
+ movq %r13, %rsi
+ callq .LmulPv576x64
+ movq 400(%rsp), %r8
+ movq 392(%rsp), %r9
+ movq 384(%rsp), %rsi
+ movq 376(%rsp), %rdi
+ movq 368(%rsp), %rbx
+ movq 360(%rsp), %rbp
+ addq 328(%rsp), %r15
+ movq 352(%rsp), %rcx
+ movq 336(%rsp), %r12
+ movq 344(%rsp), %rdx
+ movq 80(%rsp), %rax # 8-byte Reload
+ movq %r15, 40(%rax)
+ adcq (%rsp), %r12 # 8-byte Folded Reload
+ adcq 8(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, (%rsp) # 8-byte Spill
+ adcq 16(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 48(%r14), %rdx
+ leaq 248(%rsp), %rdi
+ movq %r13, %rsi
+ movq %r13, %r15
+ callq .LmulPv576x64
+ movq 320(%rsp), %r8
+ movq 312(%rsp), %r9
+ movq 304(%rsp), %rsi
+ movq 296(%rsp), %rdi
+ movq 288(%rsp), %rbx
+ movq 280(%rsp), %rbp
+ addq 248(%rsp), %r12
+ movq 272(%rsp), %rcx
+ movq 256(%rsp), %r13
+ movq 264(%rsp), %rdx
+ movq 80(%rsp), %rax # 8-byte Reload
+ movq %r12, 48(%rax)
+ adcq (%rsp), %r13 # 8-byte Folded Reload
+ adcq 8(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, (%rsp) # 8-byte Spill
+ adcq 16(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 56(%r14), %rdx
+ leaq 168(%rsp), %rdi
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 240(%rsp), %rcx
+ movq 232(%rsp), %rdx
+ movq 224(%rsp), %rsi
+ movq 216(%rsp), %rdi
+ movq 208(%rsp), %rbx
+ addq 168(%rsp), %r13
+ movq 200(%rsp), %r12
+ movq 192(%rsp), %rbp
+ movq 176(%rsp), %r14
+ movq 184(%rsp), %r15
+ movq 80(%rsp), %rax # 8-byte Reload
+ movq %r13, 56(%rax)
+ adcq (%rsp), %r14 # 8-byte Folded Reload
+ adcq 8(%rsp), %r15 # 8-byte Folded Reload
+ adcq 16(%rsp), %rbp # 8-byte Folded Reload
+ adcq 24(%rsp), %r12 # 8-byte Folded Reload
+ adcq 32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %r13
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq 64(%rsp), %rax # 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 88(%rsp), %rdi
+ movq 72(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 88(%rsp), %r14
+ adcq 96(%rsp), %r15
+ movq 160(%rsp), %r8
+ adcq 104(%rsp), %rbp
+ movq 152(%rsp), %r9
+ movq 144(%rsp), %rdx
+ movq 136(%rsp), %rsi
+ movq 128(%rsp), %rdi
+ movq 120(%rsp), %rbx
+ movq 112(%rsp), %rax
+ movq 80(%rsp), %rcx # 8-byte Reload
+ movq %r14, 64(%rcx)
+ movq %r15, 72(%rcx)
+ adcq %r12, %rax
+ movq %rbp, 80(%rcx)
+ movq %rax, 88(%rcx)
+ adcq %r13, %rbx
+ movq %rbx, 96(%rcx)
+ adcq 32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 104(%rcx)
+ adcq 40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 112(%rcx)
+ adcq 48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 120(%rcx)
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 128(%rcx)
+ adcq $0, %r8
+ movq %r8, 136(%rcx)
+ addq $808, %rsp # imm = 0x328
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end128:
+ .size mcl_fpDbl_mulPre9Lbmi2, .Lfunc_end128-mcl_fpDbl_mulPre9Lbmi2
+
+ .globl mcl_fpDbl_sqrPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre9Lbmi2,@function
+mcl_fpDbl_sqrPre9Lbmi2: # @mcl_fpDbl_sqrPre9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $808, %rsp # imm = 0x328
+ movq %rsi, %r15
+ movq %r15, 80(%rsp) # 8-byte Spill
+ movq %rdi, %r14
+ movq %r14, 72(%rsp) # 8-byte Spill
+ movq (%r15), %rdx
+ leaq 728(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 800(%rsp), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 792(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 784(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 776(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 768(%rsp), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq 760(%rsp), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq 752(%rsp), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq 744(%rsp), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq 728(%rsp), %rax
+ movq 736(%rsp), %r12
+ movq %rax, (%r14)
+ movq 8(%r15), %rdx
+ leaq 648(%rsp), %rdi
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 720(%rsp), %r8
+ movq 712(%rsp), %rcx
+ movq 704(%rsp), %rdx
+ movq 696(%rsp), %rsi
+ movq 688(%rsp), %rdi
+ movq 680(%rsp), %rbp
+ addq 648(%rsp), %r12
+ movq 672(%rsp), %rax
+ movq 656(%rsp), %rbx
+ movq 664(%rsp), %r13
+ movq %r12, 8(%r14)
+ adcq 8(%rsp), %rbx # 8-byte Folded Reload
+ adcq 16(%rsp), %r13 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 16(%r15), %rdx
+ leaq 568(%rsp), %rdi
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 640(%rsp), %r8
+ movq 632(%rsp), %rcx
+ movq 624(%rsp), %rdx
+ movq 616(%rsp), %rsi
+ movq 608(%rsp), %rdi
+ movq 600(%rsp), %rbp
+ addq 568(%rsp), %rbx
+ movq 592(%rsp), %rax
+ movq 576(%rsp), %r14
+ movq 584(%rsp), %r12
+ movq 72(%rsp), %r15 # 8-byte Reload
+ movq %rbx, 16(%r15)
+ adcq %r13, %r14
+ adcq 16(%rsp), %r12 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 24(%rsi), %rdx
+ leaq 488(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 560(%rsp), %r8
+ movq 552(%rsp), %rcx
+ movq 544(%rsp), %rdx
+ movq 536(%rsp), %rsi
+ movq 528(%rsp), %rdi
+ movq 520(%rsp), %rbp
+ addq 488(%rsp), %r14
+ movq 512(%rsp), %rax
+ movq 496(%rsp), %rbx
+ movq 504(%rsp), %r13
+ movq %r14, 24(%r15)
+ adcq %r12, %rbx
+ adcq 16(%rsp), %r13 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 32(%rsi), %rdx
+ leaq 408(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 480(%rsp), %r8
+ movq 472(%rsp), %rcx
+ movq 464(%rsp), %rdx
+ movq 456(%rsp), %rsi
+ movq 448(%rsp), %rdi
+ movq 440(%rsp), %rbp
+ addq 408(%rsp), %rbx
+ movq 432(%rsp), %rax
+ movq 416(%rsp), %r14
+ movq 424(%rsp), %r12
+ movq %rbx, 32(%r15)
+ adcq %r13, %r14
+ adcq 16(%rsp), %r12 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 40(%rsi), %rdx
+ leaq 328(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 400(%rsp), %r8
+ movq 392(%rsp), %rcx
+ movq 384(%rsp), %rdx
+ movq 376(%rsp), %rsi
+ movq 368(%rsp), %rdi
+ movq 360(%rsp), %rbp
+ addq 328(%rsp), %r14
+ movq 352(%rsp), %rax
+ movq 336(%rsp), %rbx
+ movq 344(%rsp), %r13
+ movq %r14, 40(%r15)
+ adcq %r12, %rbx
+ adcq 16(%rsp), %r13 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 48(%rsi), %rdx
+ leaq 248(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 320(%rsp), %r8
+ movq 312(%rsp), %rcx
+ movq 304(%rsp), %rdx
+ movq 296(%rsp), %rsi
+ movq 288(%rsp), %rdi
+ movq 280(%rsp), %rbp
+ addq 248(%rsp), %rbx
+ movq 272(%rsp), %rax
+ movq 256(%rsp), %r12
+ movq 264(%rsp), %r14
+ movq %rbx, 48(%r15)
+ adcq %r13, %r12
+ adcq 16(%rsp), %r14 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 56(%rsi), %rdx
+ leaq 168(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 240(%rsp), %r8
+ movq 232(%rsp), %rdx
+ movq 224(%rsp), %rsi
+ movq 216(%rsp), %rdi
+ movq 208(%rsp), %rbx
+ movq 200(%rsp), %rcx
+ addq 168(%rsp), %r12
+ movq 192(%rsp), %r15
+ movq 176(%rsp), %r13
+ movq 184(%rsp), %rbp
+ movq 72(%rsp), %rax # 8-byte Reload
+ movq %r12, 56(%rax)
+ adcq %r14, %r13
+ adcq 16(%rsp), %rbp # 8-byte Folded Reload
+ adcq 24(%rsp), %r15 # 8-byte Folded Reload
+ adcq 32(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, %r12
+ adcq 40(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %r14
+ adcq 48(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 64(%rsi), %rdx
+ leaq 88(%rsp), %rdi
+ callq .LmulPv576x64
+ addq 88(%rsp), %r13
+ adcq 96(%rsp), %rbp
+ movq 160(%rsp), %r8
+ adcq 104(%rsp), %r15
+ movq 152(%rsp), %r9
+ movq 144(%rsp), %rdx
+ movq 136(%rsp), %rsi
+ movq 128(%rsp), %rdi
+ movq 120(%rsp), %rbx
+ movq 112(%rsp), %rax
+ movq 72(%rsp), %rcx # 8-byte Reload
+ movq %r13, 64(%rcx)
+ movq %rbp, 72(%rcx)
+ adcq %r12, %rax
+ movq %r15, 80(%rcx)
+ movq %rax, 88(%rcx)
+ adcq %r14, %rbx
+ movq %rbx, 96(%rcx)
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 104(%rcx)
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 112(%rcx)
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 120(%rcx)
+ adcq 64(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 128(%rcx)
+ adcq $0, %r8
+ movq %r8, 136(%rcx)
+ addq $808, %rsp # imm = 0x328
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end129:
+ .size mcl_fpDbl_sqrPre9Lbmi2, .Lfunc_end129-mcl_fpDbl_sqrPre9Lbmi2
+
+ .globl mcl_fp_mont9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont9Lbmi2,@function
+mcl_fp_mont9Lbmi2: # @mcl_fp_mont9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1560, %rsp # imm = 0x618
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq %rdx, 32(%rsp) # 8-byte Spill
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq %rdi, (%rsp) # 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 16(%rsp) # 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1480(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 1480(%rsp), %r14
+ movq 1488(%rsp), %r15
+ movq %r14, %rdx
+ imulq %rbx, %rdx
+ movq 1552(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ movq 1544(%rsp), %rax
+ movq %rax, 104(%rsp) # 8-byte Spill
+ movq 1536(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 1528(%rsp), %r12
+ movq 1520(%rsp), %r13
+ movq 1512(%rsp), %rbx
+ movq 1504(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 1496(%rsp), %rbp
+ leaq 1400(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 1400(%rsp), %r14
+ adcq 1408(%rsp), %r15
+ adcq 1416(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 1424(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ adcq 1432(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ adcq 1440(%rsp), %r13
+ movq %r13, 64(%rsp) # 8-byte Spill
+ adcq 1448(%rsp), %r12
+ movq %r12, 48(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rbx # 8-byte Reload
+ adcq 1456(%rsp), %rbx
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 1464(%rsp), %r14
+ movq 112(%rsp), %r13 # 8-byte Reload
+ adcq 1472(%rsp), %r13
+ sbbq %rbp, %rbp
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1320(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebp
+ addq 1320(%rsp), %r15
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 1328(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 1336(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r12 # 8-byte Reload
+ adcq 1344(%rsp), %r12
+ movq 64(%rsp), %rax # 8-byte Reload
+ adcq 1352(%rsp), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 1360(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ adcq 1368(%rsp), %rbx
+ adcq 1376(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ adcq 1384(%rsp), %r13
+ movq %r13, 112(%rsp) # 8-byte Spill
+ adcq 1392(%rsp), %rbp
+ sbbq %r14, %r14
+ movq %r15, %rdx
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 1240(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq %r14, %rax
+ andl $1, %eax
+ addq 1240(%rsp), %r15
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 1248(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r14 # 8-byte Reload
+ adcq 1256(%rsp), %r14
+ adcq 1264(%rsp), %r12
+ movq %r12, 40(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 1272(%rsp), %r12
+ movq 48(%rsp), %r13 # 8-byte Reload
+ adcq 1280(%rsp), %r13
+ adcq 1288(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r15 # 8-byte Reload
+ adcq 1296(%rsp), %r15
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq 1304(%rsp), %rbx
+ adcq 1312(%rsp), %rbp
+ adcq $0, %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 1160(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 96(%rsp), %rax # 8-byte Reload
+ addq 1160(%rsp), %rax
+ adcq 1168(%rsp), %r14
+ movq %r14, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r14 # 8-byte Reload
+ adcq 1176(%rsp), %r14
+ adcq 1184(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ movq %r13, %r12
+ adcq 1192(%rsp), %r12
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 1200(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ adcq 1208(%rsp), %r15
+ movq %r15, %r13
+ adcq 1216(%rsp), %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ adcq 1224(%rsp), %rbp
+ movq 72(%rsp), %rcx # 8-byte Reload
+ adcq 1232(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 1080(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq %r15, %rax
+ andl $1, %eax
+ addq 1080(%rsp), %rbx
+ movq 80(%rsp), %rcx # 8-byte Reload
+ adcq 1088(%rsp), %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ movq %r14, %r15
+ adcq 1096(%rsp), %r15
+ movq 64(%rsp), %r14 # 8-byte Reload
+ adcq 1104(%rsp), %r14
+ movq %r12, %rbx
+ adcq 1112(%rsp), %rbx
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 1120(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ adcq 1128(%rsp), %r13
+ movq %r13, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %r13 # 8-byte Reload
+ adcq 1136(%rsp), %r13
+ adcq 1144(%rsp), %rbp
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 1152(%rsp), %r12
+ adcq $0, %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 1000(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 80(%rsp), %rax # 8-byte Reload
+ addq 1000(%rsp), %rax
+ adcq 1008(%rsp), %r15
+ movq %r15, 40(%rsp) # 8-byte Spill
+ adcq 1016(%rsp), %r14
+ movq %r14, %r15
+ adcq 1024(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 1032(%rsp), %r14
+ movq 104(%rsp), %rcx # 8-byte Reload
+ adcq 1040(%rsp), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ adcq 1048(%rsp), %r13
+ movq %r13, 112(%rsp) # 8-byte Spill
+ adcq 1056(%rsp), %rbp
+ adcq 1064(%rsp), %r12
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 1072(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 920(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 920(%rsp), %r13
+ movq 40(%rsp), %rcx # 8-byte Reload
+ adcq 928(%rsp), %rcx
+ movq %rcx, 40(%rsp) # 8-byte Spill
+ adcq 936(%rsp), %r15
+ movq %r15, 64(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r15 # 8-byte Reload
+ adcq 944(%rsp), %r15
+ movq %r14, %r13
+ adcq 952(%rsp), %r13
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 960(%rsp), %r14
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq 968(%rsp), %rbx
+ adcq 976(%rsp), %rbp
+ adcq 984(%rsp), %r12
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 992(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 840(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 40(%rsp), %rax # 8-byte Reload
+ addq 840(%rsp), %rax
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 848(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ adcq 856(%rsp), %r15
+ adcq 864(%rsp), %r13
+ movq %r13, 88(%rsp) # 8-byte Spill
+ adcq 872(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ adcq 880(%rsp), %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ adcq 888(%rsp), %rbp
+ adcq 896(%rsp), %r12
+ movq 96(%rsp), %r13 # 8-byte Reload
+ adcq 904(%rsp), %r13
+ movq 80(%rsp), %rcx # 8-byte Reload
+ adcq 912(%rsp), %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 760(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 760(%rsp), %r14
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 768(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ adcq 776(%rsp), %r15
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 784(%rsp), %r14
+ movq 104(%rsp), %rcx # 8-byte Reload
+ adcq 792(%rsp), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rcx # 8-byte Reload
+ adcq 800(%rsp), %rcx
+ movq %rcx, 112(%rsp) # 8-byte Spill
+ adcq 808(%rsp), %rbp
+ movq %r12, %rbx
+ adcq 816(%rsp), %rbx
+ movq %r13, %r12
+ adcq 824(%rsp), %r12
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 832(%rsp), %r13
+ adcq $0, %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 680(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 64(%rsp), %rax # 8-byte Reload
+ addq 680(%rsp), %rax
+ adcq 688(%rsp), %r15
+ movq %r15, 48(%rsp) # 8-byte Spill
+ adcq 696(%rsp), %r14
+ movq %r14, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rcx # 8-byte Reload
+ adcq 704(%rsp), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %r15 # 8-byte Reload
+ adcq 712(%rsp), %r15
+ adcq 720(%rsp), %rbp
+ adcq 728(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 736(%rsp), %r12
+ movq %r12, 96(%rsp) # 8-byte Spill
+ adcq 744(%rsp), %r13
+ movq %r13, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r13 # 8-byte Reload
+ adcq 752(%rsp), %r13
+ sbbq %r14, %r14
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 600(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %r14d
+ addq 600(%rsp), %rbx
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 608(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 616(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rbx # 8-byte Reload
+ adcq 624(%rsp), %rbx
+ adcq 632(%rsp), %r15
+ movq %r15, 112(%rsp) # 8-byte Spill
+ adcq 640(%rsp), %rbp
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 648(%rsp), %r12
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 656(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r15 # 8-byte Reload
+ adcq 664(%rsp), %r15
+ adcq 672(%rsp), %r13
+ adcq $0, %r14
+ movq %r14, 64(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 520(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 48(%rsp), %rax # 8-byte Reload
+ addq 520(%rsp), %rax
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 528(%rsp), %r14
+ adcq 536(%rsp), %rbx
+ movq %rbx, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rcx # 8-byte Reload
+ adcq 544(%rsp), %rcx
+ movq %rcx, 112(%rsp) # 8-byte Spill
+ adcq 552(%rsp), %rbp
+ adcq 560(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq 96(%rsp), %r12 # 8-byte Reload
+ adcq 568(%rsp), %r12
+ adcq 576(%rsp), %r15
+ movq %r15, 80(%rsp) # 8-byte Spill
+ adcq 584(%rsp), %r13
+ movq %r13, 40(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r15 # 8-byte Reload
+ adcq 592(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 440(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 440(%rsp), %r13
+ adcq 448(%rsp), %r14
+ movq %r14, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 456(%rsp), %r14
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq 464(%rsp), %rbx
+ adcq 472(%rsp), %rbp
+ movq %rbp, 8(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rcx # 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ adcq 488(%rsp), %r12
+ movq %r12, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 496(%rsp), %rbp
+ movq 40(%rsp), %r12 # 8-byte Reload
+ adcq 504(%rsp), %r12
+ adcq 512(%rsp), %r15
+ movq %r15, %r13
+ adcq $0, %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 360(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 88(%rsp), %rax # 8-byte Reload
+ addq 360(%rsp), %rax
+ adcq 368(%rsp), %r14
+ adcq 376(%rsp), %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ movq 8(%rsp), %rcx # 8-byte Reload
+ adcq 384(%rsp), %rcx
+ movq %rcx, 8(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 392(%rsp), %rbx
+ movq 96(%rsp), %r15 # 8-byte Reload
+ adcq 400(%rsp), %r15
+ adcq 408(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 416(%rsp), %r12
+ movq %r12, %rbp
+ adcq 424(%rsp), %r13
+ movq %r13, 64(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rcx # 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 280(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %r13d
+ addq 280(%rsp), %r12
+ adcq 288(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rax # 8-byte Reload
+ adcq 296(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ movq 8(%rsp), %r14 # 8-byte Reload
+ adcq 304(%rsp), %r14
+ adcq 312(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 320(%rsp), %r15
+ movq %r15, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbx # 8-byte Reload
+ adcq 328(%rsp), %rbx
+ adcq 336(%rsp), %rbp
+ movq %rbp, 40(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 344(%rsp), %r12
+ movq 48(%rsp), %rbp # 8-byte Reload
+ adcq 352(%rsp), %rbp
+ adcq $0, %r13
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 200(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 104(%rsp), %rax # 8-byte Reload
+ addq 200(%rsp), %rax
+ movq 112(%rsp), %r15 # 8-byte Reload
+ adcq 208(%rsp), %r15
+ adcq 216(%rsp), %r14
+ movq %r14, 8(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r14 # 8-byte Reload
+ adcq 224(%rsp), %r14
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 232(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ adcq 240(%rsp), %rbx
+ movq %rbx, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %rcx # 8-byte Reload
+ adcq 248(%rsp), %rcx
+ movq %rcx, 40(%rsp) # 8-byte Spill
+ adcq 256(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq 264(%rsp), %rbp
+ movq %rbp, 48(%rsp) # 8-byte Spill
+ adcq 272(%rsp), %r13
+ sbbq %rbx, %rbx
+ movq 16(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %r12
+ leaq 120(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebx
+ addq 120(%rsp), %r12
+ adcq 128(%rsp), %r15
+ movq 8(%rsp), %rbp # 8-byte Reload
+ adcq 136(%rsp), %rbp
+ movq %r14, %rcx
+ adcq 144(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ movq 96(%rsp), %r8 # 8-byte Reload
+ adcq 152(%rsp), %r8
+ movq %r8, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r9 # 8-byte Reload
+ adcq 160(%rsp), %r9
+ movq %r9, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r10 # 8-byte Reload
+ adcq 168(%rsp), %r10
+ movq %r10, 40(%rsp) # 8-byte Spill
+ movq 64(%rsp), %rdi # 8-byte Reload
+ adcq 176(%rsp), %rdi
+ movq %rdi, 64(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r14 # 8-byte Reload
+ adcq 184(%rsp), %r14
+ adcq 192(%rsp), %r13
+ adcq $0, %rbx
+ movq %r15, %rsi
+ movq %r15, %r12
+ movq 56(%rsp), %rdx # 8-byte Reload
+ subq (%rdx), %rsi
+ movq %rbp, %rax
+ movq %rbp, %r15
+ sbbq 8(%rdx), %rax
+ movq %rcx, %rbp
+ sbbq 16(%rdx), %rbp
+ movq %r8, %rcx
+ sbbq 24(%rdx), %rcx
+ movq %r9, %r8
+ sbbq 32(%rdx), %r8
+ movq %r10, %r11
+ sbbq 40(%rdx), %r11
+ movq %rdi, %r10
+ sbbq 48(%rdx), %r10
+ movq %r14, %rdi
+ sbbq 56(%rdx), %rdi
+ movq %r13, %r9
+ sbbq 64(%rdx), %r9
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %r13, %r9
+ testb %bl, %bl
+ cmovneq %r12, %rsi
+ movq (%rsp), %rbx # 8-byte Reload
+ movq %rsi, (%rbx)
+ cmovneq %r15, %rax
+ movq %rax, 8(%rbx)
+ cmovneq 72(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rbx)
+ cmovneq 96(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 24(%rbx)
+ cmovneq 80(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 32(%rbx)
+ cmovneq 40(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 40(%rbx)
+ cmovneq 64(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 48(%rbx)
+ cmovneq %r14, %rdi
+ movq %rdi, 56(%rbx)
+ movq %r9, 64(%rbx)
+ addq $1560, %rsp # imm = 0x618
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end130:
+ .size mcl_fp_mont9Lbmi2, .Lfunc_end130-mcl_fp_mont9Lbmi2
+
+ .globl mcl_fp_montNF9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF9Lbmi2,@function
+mcl_fp_montNF9Lbmi2: # @mcl_fp_montNF9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1560, %rsp # imm = 0x618
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq %rdi, (%rsp) # 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 32(%rsp) # 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1480(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 1480(%rsp), %r12
+ movq 1488(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq %r12, %rdx
+ imulq %rbx, %rdx
+ movq 1552(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ movq 1544(%rsp), %r13
+ movq 1536(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 1528(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 1520(%rsp), %r14
+ movq 1512(%rsp), %r15
+ movq 1504(%rsp), %rbx
+ movq 1496(%rsp), %rbp
+ leaq 1400(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 1400(%rsp), %r12
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 1408(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ adcq 1416(%rsp), %rbp
+ movq %rbp, 8(%rsp) # 8-byte Spill
+ adcq 1424(%rsp), %rbx
+ movq %rbx, 104(%rsp) # 8-byte Spill
+ adcq 1432(%rsp), %r15
+ movq %r15, 56(%rsp) # 8-byte Spill
+ adcq 1440(%rsp), %r14
+ movq %r14, 40(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rbx # 8-byte Reload
+ adcq 1448(%rsp), %rbx
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 1456(%rsp), %r12
+ adcq 1464(%rsp), %r13
+ movq %r13, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rbp # 8-byte Reload
+ adcq 1472(%rsp), %rbp
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1320(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 1392(%rsp), %rax
+ movq 88(%rsp), %rcx # 8-byte Reload
+ addq 1320(%rsp), %rcx
+ movq 8(%rsp), %r15 # 8-byte Reload
+ adcq 1328(%rsp), %r15
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 1336(%rsp), %r14
+ movq 56(%rsp), %rdx # 8-byte Reload
+ adcq 1344(%rsp), %rdx
+ movq %rdx, 56(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r13 # 8-byte Reload
+ adcq 1352(%rsp), %r13
+ adcq 1360(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ adcq 1368(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq 96(%rsp), %rdx # 8-byte Reload
+ adcq 1376(%rsp), %rdx
+ movq %rdx, 96(%rsp) # 8-byte Spill
+ adcq 1384(%rsp), %rbp
+ movq %rbp, 112(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, %rbp
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 1240(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 1240(%rsp), %rbx
+ adcq 1248(%rsp), %r15
+ movq %r15, 8(%rsp) # 8-byte Spill
+ adcq 1256(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r12 # 8-byte Reload
+ adcq 1264(%rsp), %r12
+ adcq 1272(%rsp), %r13
+ movq %r13, %r14
+ movq 48(%rsp), %r13 # 8-byte Reload
+ adcq 1280(%rsp), %r13
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 1288(%rsp), %rbx
+ movq 96(%rsp), %r15 # 8-byte Reload
+ adcq 1296(%rsp), %r15
+ movq 112(%rsp), %rax # 8-byte Reload
+ adcq 1304(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ adcq 1312(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 1160(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 1232(%rsp), %rax
+ movq 8(%rsp), %rcx # 8-byte Reload
+ addq 1160(%rsp), %rcx
+ movq 104(%rsp), %rbp # 8-byte Reload
+ adcq 1168(%rsp), %rbp
+ adcq 1176(%rsp), %r12
+ movq %r12, 56(%rsp) # 8-byte Spill
+ adcq 1184(%rsp), %r14
+ adcq 1192(%rsp), %r13
+ movq %r13, %r12
+ adcq 1200(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 1208(%rsp), %r15
+ movq %r15, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq 1216(%rsp), %rbx
+ movq 80(%rsp), %rdx # 8-byte Reload
+ adcq 1224(%rsp), %rdx
+ movq %rdx, 80(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ adcq $0, %r15
+ movq %rcx, %rdx
+ movq %rcx, %r13
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 1080(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 1080(%rsp), %r13
+ adcq 1088(%rsp), %rbp
+ movq %rbp, 104(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r13 # 8-byte Reload
+ adcq 1096(%rsp), %r13
+ adcq 1104(%rsp), %r14
+ adcq 1112(%rsp), %r12
+ movq %r12, 48(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 1120(%rsp), %r12
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 1128(%rsp), %rbp
+ adcq 1136(%rsp), %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbx # 8-byte Reload
+ adcq 1144(%rsp), %rbx
+ adcq 1152(%rsp), %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 1000(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 1072(%rsp), %rax
+ movq 104(%rsp), %rcx # 8-byte Reload
+ addq 1000(%rsp), %rcx
+ adcq 1008(%rsp), %r13
+ movq %r13, 56(%rsp) # 8-byte Spill
+ adcq 1016(%rsp), %r14
+ movq %r14, 40(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r14 # 8-byte Reload
+ adcq 1024(%rsp), %r14
+ adcq 1032(%rsp), %r12
+ adcq 1040(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %r13 # 8-byte Reload
+ adcq 1048(%rsp), %r13
+ adcq 1056(%rsp), %rbx
+ movq %rbx, 80(%rsp) # 8-byte Spill
+ adcq 1064(%rsp), %r15
+ movq %r15, 88(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, 104(%rsp) # 8-byte Spill
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 920(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 920(%rsp), %rbx
+ movq 56(%rsp), %rax # 8-byte Reload
+ adcq 928(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 40(%rsp), %rbp # 8-byte Reload
+ adcq 936(%rsp), %rbp
+ movq %r14, %rbx
+ adcq 944(%rsp), %rbx
+ adcq 952(%rsp), %r12
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 960(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ adcq 968(%rsp), %r13
+ movq %r13, %r15
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 976(%rsp), %r13
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 984(%rsp), %r14
+ movq 104(%rsp), %rax # 8-byte Reload
+ adcq 992(%rsp), %rax
+ movq %rax, 104(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 840(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 912(%rsp), %rax
+ movq 56(%rsp), %rcx # 8-byte Reload
+ addq 840(%rsp), %rcx
+ adcq 848(%rsp), %rbp
+ movq %rbp, 40(%rsp) # 8-byte Spill
+ adcq 856(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ adcq 864(%rsp), %r12
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 872(%rsp), %rbp
+ adcq 880(%rsp), %r15
+ movq %r15, 112(%rsp) # 8-byte Spill
+ adcq 888(%rsp), %r13
+ adcq 896(%rsp), %r14
+ movq %r14, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rdx # 8-byte Reload
+ adcq 904(%rsp), %rdx
+ movq %rdx, 104(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, %r14
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 760(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 760(%rsp), %rbx
+ movq 40(%rsp), %rax # 8-byte Reload
+ adcq 768(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r15 # 8-byte Reload
+ adcq 776(%rsp), %r15
+ adcq 784(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq %rbp, %rbx
+ adcq 792(%rsp), %rbx
+ movq 112(%rsp), %rbp # 8-byte Reload
+ adcq 800(%rsp), %rbp
+ adcq 808(%rsp), %r13
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 816(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r12 # 8-byte Reload
+ adcq 824(%rsp), %r12
+ adcq 832(%rsp), %r14
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 680(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 752(%rsp), %rcx
+ movq 40(%rsp), %rax # 8-byte Reload
+ addq 680(%rsp), %rax
+ adcq 688(%rsp), %r15
+ movq %r15, 48(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rdx # 8-byte Reload
+ adcq 696(%rsp), %rdx
+ movq %rdx, 72(%rsp) # 8-byte Spill
+ adcq 704(%rsp), %rbx
+ movq %rbx, 96(%rsp) # 8-byte Spill
+ adcq 712(%rsp), %rbp
+ movq %rbp, 112(%rsp) # 8-byte Spill
+ adcq 720(%rsp), %r13
+ movq %r13, %r15
+ movq 88(%rsp), %rbx # 8-byte Reload
+ adcq 728(%rsp), %rbx
+ adcq 736(%rsp), %r12
+ movq %r12, 104(%rsp) # 8-byte Spill
+ adcq 744(%rsp), %r14
+ movq %r14, 40(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 600(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 600(%rsp), %r13
+ movq 48(%rsp), %r13 # 8-byte Reload
+ adcq 608(%rsp), %r13
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 616(%rsp), %r12
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 624(%rsp), %rbp
+ movq 112(%rsp), %rax # 8-byte Reload
+ adcq 632(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ adcq 640(%rsp), %r15
+ movq %r15, 80(%rsp) # 8-byte Spill
+ adcq 648(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 656(%rsp), %r14
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq 664(%rsp), %rbx
+ movq 56(%rsp), %r15 # 8-byte Reload
+ adcq 672(%rsp), %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 520(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 592(%rsp), %rcx
+ movq %r13, %rax
+ addq 520(%rsp), %rax
+ adcq 528(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq %rbp, %r12
+ adcq 536(%rsp), %r12
+ movq 112(%rsp), %rbp # 8-byte Reload
+ adcq 544(%rsp), %rbp
+ movq 80(%rsp), %rdx # 8-byte Reload
+ adcq 552(%rsp), %rdx
+ movq %rdx, 80(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rdx # 8-byte Reload
+ adcq 560(%rsp), %rdx
+ movq %rdx, 88(%rsp) # 8-byte Spill
+ adcq 568(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ adcq 576(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ adcq 584(%rsp), %r15
+ movq %r15, 56(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, %r13
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 440(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 440(%rsp), %r14
+ movq 72(%rsp), %rax # 8-byte Reload
+ adcq 448(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ adcq 456(%rsp), %r12
+ adcq 464(%rsp), %rbp
+ movq %rbp, 112(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r14 # 8-byte Reload
+ adcq 472(%rsp), %r14
+ movq 88(%rsp), %r15 # 8-byte Reload
+ adcq 480(%rsp), %r15
+ movq 104(%rsp), %rbp # 8-byte Reload
+ adcq 488(%rsp), %rbp
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq 496(%rsp), %rbx
+ movq 56(%rsp), %rax # 8-byte Reload
+ adcq 504(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ adcq 512(%rsp), %r13
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 360(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 432(%rsp), %rcx
+ movq 72(%rsp), %rax # 8-byte Reload
+ addq 360(%rsp), %rax
+ adcq 368(%rsp), %r12
+ movq %r12, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rdx # 8-byte Reload
+ adcq 376(%rsp), %rdx
+ movq %rdx, 112(%rsp) # 8-byte Spill
+ adcq 384(%rsp), %r14
+ movq %r14, 80(%rsp) # 8-byte Spill
+ adcq 392(%rsp), %r15
+ movq %r15, 88(%rsp) # 8-byte Spill
+ adcq 400(%rsp), %rbp
+ movq %rbp, 104(%rsp) # 8-byte Spill
+ adcq 408(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r14 # 8-byte Reload
+ adcq 416(%rsp), %r14
+ adcq 424(%rsp), %r13
+ movq %r13, %r15
+ adcq $0, %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 280(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 280(%rsp), %r12
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 288(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rbp # 8-byte Reload
+ adcq 296(%rsp), %rbp
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r13 # 8-byte Reload
+ adcq 312(%rsp), %r13
+ movq 104(%rsp), %r12 # 8-byte Reload
+ adcq 320(%rsp), %r12
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq 328(%rsp), %rbx
+ adcq 336(%rsp), %r14
+ movq %r14, 56(%rsp) # 8-byte Spill
+ adcq 344(%rsp), %r15
+ movq %r15, 48(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r14 # 8-byte Reload
+ adcq 352(%rsp), %r14
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 200(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 272(%rsp), %rcx
+ movq 96(%rsp), %rax # 8-byte Reload
+ addq 200(%rsp), %rax
+ adcq 208(%rsp), %rbp
+ movq %rbp, 112(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 216(%rsp), %rbp
+ adcq 224(%rsp), %r13
+ movq %r13, 88(%rsp) # 8-byte Spill
+ adcq 232(%rsp), %r12
+ movq %r12, 104(%rsp) # 8-byte Spill
+ adcq 240(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r15 # 8-byte Reload
+ adcq 248(%rsp), %r15
+ movq 48(%rsp), %r12 # 8-byte Reload
+ adcq 256(%rsp), %r12
+ adcq 264(%rsp), %r14
+ adcq $0, %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbx
+ leaq 120(%rsp), %rdi
+ movq 64(%rsp), %r13 # 8-byte Reload
+ movq %r13, %rsi
+ callq .LmulPv576x64
+ addq 120(%rsp), %rbx
+ movq 112(%rsp), %rcx # 8-byte Reload
+ adcq 128(%rsp), %rcx
+ movq %rbp, %rdx
+ adcq 136(%rsp), %rdx
+ movq 88(%rsp), %rsi # 8-byte Reload
+ adcq 144(%rsp), %rsi
+ movq %rsi, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rdi # 8-byte Reload
+ adcq 152(%rsp), %rdi
+ movq %rdi, 104(%rsp) # 8-byte Spill
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq 160(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ movq %r15, %r8
+ adcq 168(%rsp), %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq %r12, %r15
+ adcq 176(%rsp), %r15
+ adcq 184(%rsp), %r14
+ movq 96(%rsp), %r9 # 8-byte Reload
+ adcq 192(%rsp), %r9
+ movq %rcx, %rax
+ movq %rcx, %r11
+ movq %r13, %rbp
+ subq (%rbp), %rax
+ movq %rdx, %rcx
+ movq %rdx, %r12
+ sbbq 8(%rbp), %rcx
+ movq %rsi, %rdx
+ sbbq 16(%rbp), %rdx
+ movq %rdi, %rsi
+ sbbq 24(%rbp), %rsi
+ movq %rbx, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %r8, %r10
+ sbbq 40(%rbp), %r10
+ movq %r15, %r13
+ sbbq 48(%rbp), %r13
+ movq %r14, %r8
+ sbbq 56(%rbp), %r8
+ movq %rbp, %rbx
+ movq %r9, %rbp
+ sbbq 64(%rbx), %rbp
+ movq %rbp, %rbx
+ sarq $63, %rbx
+ cmovsq %r11, %rax
+ movq (%rsp), %rbx # 8-byte Reload
+ movq %rax, (%rbx)
+ cmovsq %r12, %rcx
+ movq %rcx, 8(%rbx)
+ cmovsq 88(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 16(%rbx)
+ cmovsq 104(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovsq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rbx)
+ cmovsq 56(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 40(%rbx)
+ cmovsq %r15, %r13
+ movq %r13, 48(%rbx)
+ cmovsq %r14, %r8
+ movq %r8, 56(%rbx)
+ cmovsq %r9, %rbp
+ movq %rbp, 64(%rbx)
+ addq $1560, %rsp # imm = 0x618
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end131:
+ .size mcl_fp_montNF9Lbmi2, .Lfunc_end131-mcl_fp_montNF9Lbmi2
+
+ .globl mcl_fp_montRed9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed9Lbmi2,@function
+mcl_fp_montRed9Lbmi2: # @mcl_fp_montRed9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $936, %rsp # imm = 0x3A8
+ movq %rdx, %rax
+ movq %rax, 128(%rsp) # 8-byte Spill
+ movq %rdi, 80(%rsp) # 8-byte Spill
+ movq -8(%rax), %rcx
+ movq %rcx, 120(%rsp) # 8-byte Spill
+ movq (%rsi), %r14
+ movq 8(%rsi), %rdx
+ movq %rdx, 192(%rsp) # 8-byte Spill
+ movq %r14, %rdx
+ imulq %rcx, %rdx
+ movq 136(%rsi), %rcx
+ movq %rcx, 112(%rsp) # 8-byte Spill
+ movq 128(%rsi), %rcx
+ movq %rcx, 152(%rsp) # 8-byte Spill
+ movq 120(%rsi), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ movq 112(%rsi), %rcx
+ movq %rcx, 144(%rsp) # 8-byte Spill
+ movq 104(%rsi), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ movq 96(%rsi), %rcx
+ movq %rcx, 208(%rsp) # 8-byte Spill
+ movq 88(%rsi), %rcx
+ movq %rcx, 200(%rsp) # 8-byte Spill
+ movq 80(%rsi), %rcx
+ movq %rcx, 160(%rsp) # 8-byte Spill
+ movq 72(%rsi), %r12
+ movq 64(%rsi), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 56(%rsi), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ movq 48(%rsi), %rcx
+ movq %rcx, 136(%rsp) # 8-byte Spill
+ movq 40(%rsi), %rbp
+ movq 32(%rsi), %rbx
+ movq 24(%rsi), %r13
+ movq 16(%rsi), %r15
+ movq %rax, %rcx
+ movq (%rcx), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq 64(%rcx), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 56(%rcx), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 48(%rcx), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq %rcx, %rsi
+ leaq 856(%rsp), %rdi
+ callq .LmulPv576x64
+ addq 856(%rsp), %r14
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 864(%rsp), %rcx
+ adcq 872(%rsp), %r15
+ adcq 880(%rsp), %r13
+ adcq 888(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ adcq 896(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq 136(%rsp), %rax # 8-byte Reload
+ adcq 904(%rsp), %rax
+ movq %rax, 136(%rsp) # 8-byte Spill
+ movq 168(%rsp), %rax # 8-byte Reload
+ adcq 912(%rsp), %rax
+ movq %rax, 168(%rsp) # 8-byte Spill
+ movq 176(%rsp), %rax # 8-byte Reload
+ adcq 920(%rsp), %rax
+ movq %rax, 176(%rsp) # 8-byte Spill
+ adcq 928(%rsp), %r12
+ movq %r12, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ adcq $0, 200(%rsp) # 8-byte Folded Spill
+ adcq $0, 208(%rsp) # 8-byte Folded Spill
+ adcq $0, 184(%rsp) # 8-byte Folded Spill
+ adcq $0, 144(%rsp) # 8-byte Folded Spill
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ adcq $0, 152(%rsp) # 8-byte Folded Spill
+ movq 112(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ sbbq %r12, %r12
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 776(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %r12d
+ addq 776(%rsp), %rbx
+ adcq 784(%rsp), %r15
+ adcq 792(%rsp), %r13
+ movq %r13, (%rsp) # 8-byte Spill
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 800(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 808(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 136(%rsp), %rax # 8-byte Reload
+ adcq 816(%rsp), %rax
+ movq %rax, 136(%rsp) # 8-byte Spill
+ movq 168(%rsp), %rax # 8-byte Reload
+ adcq 824(%rsp), %rax
+ movq %rax, 168(%rsp) # 8-byte Spill
+ movq 176(%rsp), %rax # 8-byte Reload
+ adcq 832(%rsp), %rax
+ movq %rax, 176(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rax # 8-byte Reload
+ adcq 840(%rsp), %rax
+ movq %rax, 192(%rsp) # 8-byte Spill
+ adcq 848(%rsp), %rbp
+ movq %rbp, 160(%rsp) # 8-byte Spill
+ movq 200(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ adcq $0, 208(%rsp) # 8-byte Folded Spill
+ adcq $0, 184(%rsp) # 8-byte Folded Spill
+ adcq $0, 144(%rsp) # 8-byte Folded Spill
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ movq 152(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, %r14
+ movq %r14, 112(%rsp) # 8-byte Spill
+ adcq $0, %r12
+ movq %r15, %rdx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 696(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 696(%rsp), %r15
+ movq (%rsp), %rcx # 8-byte Reload
+ adcq 704(%rsp), %rcx
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 712(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 720(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 136(%rsp), %rbp # 8-byte Reload
+ adcq 728(%rsp), %rbp
+ movq 168(%rsp), %r14 # 8-byte Reload
+ adcq 736(%rsp), %r14
+ movq 176(%rsp), %r15 # 8-byte Reload
+ adcq 744(%rsp), %r15
+ movq 192(%rsp), %rax # 8-byte Reload
+ adcq 752(%rsp), %rax
+ movq %rax, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rax # 8-byte Reload
+ adcq 760(%rsp), %rax
+ movq %rax, 160(%rsp) # 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 200(%rsp) # 8-byte Spill
+ adcq $0, 208(%rsp) # 8-byte Folded Spill
+ movq 184(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ adcq $0, 144(%rsp) # 8-byte Folded Spill
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 152(%rsp) # 8-byte Spill
+ adcq $0, 112(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rcx, %rbx
+ movq %rbx, %rdx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 616(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 616(%rsp), %rbx
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 624(%rsp), %rax
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 632(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ adcq 640(%rsp), %rbp
+ movq %rbp, 136(%rsp) # 8-byte Spill
+ adcq 648(%rsp), %r14
+ movq %r14, 168(%rsp) # 8-byte Spill
+ adcq 656(%rsp), %r15
+ movq 192(%rsp), %r14 # 8-byte Reload
+ adcq 664(%rsp), %r14
+ movq 160(%rsp), %rbp # 8-byte Reload
+ adcq 672(%rsp), %rbp
+ movq 200(%rsp), %rcx # 8-byte Reload
+ adcq 680(%rsp), %rcx
+ movq %rcx, 200(%rsp) # 8-byte Spill
+ movq 208(%rsp), %rcx # 8-byte Reload
+ adcq 688(%rsp), %rcx
+ movq %rcx, 208(%rsp) # 8-byte Spill
+ adcq $0, %r13
+ movq %r13, 184(%rsp) # 8-byte Spill
+ adcq $0, 144(%rsp) # 8-byte Folded Spill
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ adcq $0, 152(%rsp) # 8-byte Folded Spill
+ adcq $0, 112(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 536(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 536(%rsp), %rbx
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 544(%rsp), %rax
+ movq 136(%rsp), %rcx # 8-byte Reload
+ adcq 552(%rsp), %rcx
+ movq %rcx, 136(%rsp) # 8-byte Spill
+ movq 168(%rsp), %rcx # 8-byte Reload
+ adcq 560(%rsp), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ adcq 568(%rsp), %r15
+ movq %r15, 176(%rsp) # 8-byte Spill
+ adcq 576(%rsp), %r14
+ movq %r14, 192(%rsp) # 8-byte Spill
+ adcq 584(%rsp), %rbp
+ movq %rbp, 160(%rsp) # 8-byte Spill
+ movq 200(%rsp), %r13 # 8-byte Reload
+ adcq 592(%rsp), %r13
+ movq 208(%rsp), %r15 # 8-byte Reload
+ adcq 600(%rsp), %r15
+ movq 184(%rsp), %rbp # 8-byte Reload
+ adcq 608(%rsp), %rbp
+ movq 144(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ adcq $0, 152(%rsp) # 8-byte Folded Spill
+ adcq $0, 112(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 456(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 456(%rsp), %r14
+ movq 136(%rsp), %rax # 8-byte Reload
+ adcq 464(%rsp), %rax
+ movq 168(%rsp), %rcx # 8-byte Reload
+ adcq 472(%rsp), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ movq 176(%rsp), %rcx # 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 488(%rsp), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rcx # 8-byte Reload
+ adcq 496(%rsp), %rcx
+ movq %rcx, 160(%rsp) # 8-byte Spill
+ adcq 504(%rsp), %r13
+ movq %r13, 200(%rsp) # 8-byte Spill
+ adcq 512(%rsp), %r15
+ movq %r15, 208(%rsp) # 8-byte Spill
+ adcq 520(%rsp), %rbp
+ movq %rbp, 184(%rsp) # 8-byte Spill
+ adcq 528(%rsp), %rbx
+ movq %rbx, 144(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ movq 152(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 376(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 376(%rsp), %r15
+ movq 168(%rsp), %rax # 8-byte Reload
+ adcq 384(%rsp), %rax
+ movq 176(%rsp), %rcx # 8-byte Reload
+ adcq 392(%rsp), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 400(%rsp), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rbp # 8-byte Reload
+ adcq 408(%rsp), %rbp
+ movq 200(%rsp), %rcx # 8-byte Reload
+ adcq 416(%rsp), %rcx
+ movq %rcx, 200(%rsp) # 8-byte Spill
+ movq 208(%rsp), %rcx # 8-byte Reload
+ adcq 424(%rsp), %rcx
+ movq %rcx, 208(%rsp) # 8-byte Spill
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ movq 144(%rsp), %r15 # 8-byte Reload
+ adcq 440(%rsp), %r15
+ adcq 448(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ adcq $0, %r13
+ movq %r13, %r14
+ adcq $0, %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ adcq $0, %r12
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 296(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 296(%rsp), %rbx
+ movq 176(%rsp), %rax # 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq 192(%rsp), %r13 # 8-byte Reload
+ adcq 312(%rsp), %r13
+ adcq 320(%rsp), %rbp
+ movq 200(%rsp), %rcx # 8-byte Reload
+ adcq 328(%rsp), %rcx
+ movq %rcx, 200(%rsp) # 8-byte Spill
+ movq 208(%rsp), %rcx # 8-byte Reload
+ adcq 336(%rsp), %rcx
+ movq %rcx, 208(%rsp) # 8-byte Spill
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 344(%rsp), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ adcq 352(%rsp), %r15
+ movq %r15, 144(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r15 # 8-byte Reload
+ adcq 360(%rsp), %r15
+ adcq 368(%rsp), %r14
+ movq %r14, 152(%rsp) # 8-byte Spill
+ movq 112(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ adcq $0, %r12
+ movq 120(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbx
+ leaq 216(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 216(%rsp), %rbx
+ movq %r13, %rsi
+ adcq 224(%rsp), %rsi
+ movq %rsi, 192(%rsp) # 8-byte Spill
+ adcq 232(%rsp), %rbp
+ movq %rbp, 160(%rsp) # 8-byte Spill
+ movq 200(%rsp), %r9 # 8-byte Reload
+ adcq 240(%rsp), %r9
+ movq %r9, 200(%rsp) # 8-byte Spill
+ movq 208(%rsp), %r8 # 8-byte Reload
+ adcq 248(%rsp), %r8
+ movq %r8, 208(%rsp) # 8-byte Spill
+ movq 184(%rsp), %rbx # 8-byte Reload
+ adcq 256(%rsp), %rbx
+ movq 144(%rsp), %rax # 8-byte Reload
+ adcq 264(%rsp), %rax
+ movq %r15, %rcx
+ adcq 272(%rsp), %rcx
+ movq 152(%rsp), %rdx # 8-byte Reload
+ adcq 280(%rsp), %rdx
+ movq %rdx, 152(%rsp) # 8-byte Spill
+ adcq 288(%rsp), %r14
+ movq %r14, %r11
+ adcq $0, %r12
+ subq 16(%rsp), %rsi # 8-byte Folded Reload
+ movq %rbp, %rdi
+ sbbq 8(%rsp), %rdi # 8-byte Folded Reload
+ movq %r9, %rbp
+ sbbq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %r8, %r13
+ sbbq 32(%rsp), %r13 # 8-byte Folded Reload
+ movq %rbx, %r15
+ sbbq 40(%rsp), %r15 # 8-byte Folded Reload
+ movq %rax, %r14
+ sbbq 48(%rsp), %r14 # 8-byte Folded Reload
+ movq %rcx, %r10
+ sbbq 56(%rsp), %r10 # 8-byte Folded Reload
+ movq %rdx, %r8
+ sbbq 64(%rsp), %r8 # 8-byte Folded Reload
+ movq %r11, %r9
+ sbbq 72(%rsp), %r9 # 8-byte Folded Reload
+ sbbq $0, %r12
+ andl $1, %r12d
+ cmovneq %r11, %r9
+ testb %r12b, %r12b
+ cmovneq 192(%rsp), %rsi # 8-byte Folded Reload
+ movq 80(%rsp), %rdx # 8-byte Reload
+ movq %rsi, (%rdx)
+ cmovneq 160(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 8(%rdx)
+ cmovneq 200(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rdx)
+ cmovneq 208(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 24(%rdx)
+ cmovneq %rbx, %r15
+ movq %r15, 32(%rdx)
+ cmovneq %rax, %r14
+ movq %r14, 40(%rdx)
+ cmovneq %rcx, %r10
+ movq %r10, 48(%rdx)
+ cmovneq 152(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 56(%rdx)
+ movq %r9, 64(%rdx)
+ addq $936, %rsp # imm = 0x3A8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end132:
+ .size mcl_fp_montRed9Lbmi2, .Lfunc_end132-mcl_fp_montRed9Lbmi2
+
+ .globl mcl_fp_addPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre9Lbmi2,@function
+mcl_fp_addPre9Lbmi2: # @mcl_fp_addPre9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r8
+ movq 64(%rsi), %r15
+ movq 56(%rsi), %r9
+ movq 48(%rsi), %r10
+ movq 40(%rsi), %r11
+ movq 24(%rsi), %r12
+ movq 32(%rsi), %r14
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rcx
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rcx
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r12
+ movq 56(%rdx), %r13
+ movq 48(%rdx), %rsi
+ movq 40(%rdx), %rbp
+ movq 32(%rdx), %rdx
+ movq %rbx, (%rdi)
+ movq %rcx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r12, 24(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 32(%rdi)
+ adcq %r11, %rbp
+ movq %rbp, 40(%rdi)
+ adcq %r10, %rsi
+ movq %rsi, 48(%rdi)
+ adcq %r9, %r13
+ movq %r13, 56(%rdi)
+ adcq %r8, %r15
+ movq %r15, 64(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end133:
+ .size mcl_fp_addPre9Lbmi2, .Lfunc_end133-mcl_fp_addPre9Lbmi2
+
+ .globl mcl_fp_subPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre9Lbmi2,@function
+mcl_fp_subPre9Lbmi2: # @mcl_fp_subPre9Lbmi2
+# BB#0:
+ movq 32(%rdx), %r8
+ movq (%rsi), %rcx
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ movq %rcx, (%rdi)
+ movq 8(%rsi), %rcx
+ sbbq 8(%rdx), %rcx
+ movq %rcx, 8(%rdi)
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq %rcx, 16(%rdi)
+ movq 24(%rsi), %rcx
+ sbbq 24(%rdx), %rcx
+ movq %rcx, 24(%rdi)
+ movq 32(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 40(%rdx), %r8
+ movq %rcx, 32(%rdi)
+ movq 40(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 48(%rdx), %r8
+ movq %rcx, 40(%rdi)
+ movq 48(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 56(%rdx), %r8
+ movq %rcx, 48(%rdi)
+ movq 56(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq %rcx, 56(%rdi)
+ movq 64(%rdx), %rcx
+ movq 64(%rsi), %rdx
+ sbbq %rcx, %rdx
+ movq %rdx, 64(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end134:
+ .size mcl_fp_subPre9Lbmi2, .Lfunc_end134-mcl_fp_subPre9Lbmi2
+
+ .globl mcl_fp_shr1_9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_9Lbmi2,@function
+mcl_fp_shr1_9Lbmi2: # @mcl_fp_shr1_9Lbmi2
+# BB#0:
+ pushq %rbx
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %r9
+ movq 48(%rsi), %r10
+ movq 40(%rsi), %r11
+ movq 32(%rsi), %rcx
+ movq 24(%rsi), %rdx
+ movq 16(%rsi), %rax
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rbx
+ movq %rbx, (%rdi)
+ shrdq $1, %rax, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rdx, %rax
+ movq %rax, 16(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 24(%rdi)
+ shrdq $1, %r11, %rcx
+ movq %rcx, 32(%rdi)
+ shrdq $1, %r10, %r11
+ movq %r11, 40(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 48(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 56(%rdi)
+ shrq %r8
+ movq %r8, 64(%rdi)
+ popq %rbx
+ retq
+.Lfunc_end135:
+ .size mcl_fp_shr1_9Lbmi2, .Lfunc_end135-mcl_fp_shr1_9Lbmi2
+
+ .globl mcl_fp_add9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add9Lbmi2,@function
+mcl_fp_add9Lbmi2: # @mcl_fp_add9Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r12
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %r13
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r10
+ movq 24(%rsi), %r14
+ movq 32(%rsi), %r11
+ movq (%rdx), %rbx
+ movq 8(%rdx), %r15
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %r15
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r14
+ adcq 32(%rdx), %r11
+ adcq 40(%rdx), %r10
+ movq 56(%rdx), %rsi
+ adcq 48(%rdx), %r9
+ movq %rbx, (%rdi)
+ movq %r15, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r14, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 56(%rdi)
+ adcq %r12, %r8
+ movq %r8, 64(%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %rbx
+ sbbq 8(%rcx), %r15
+ sbbq 16(%rcx), %rax
+ sbbq 24(%rcx), %r14
+ sbbq 32(%rcx), %r11
+ sbbq 40(%rcx), %r10
+ sbbq 48(%rcx), %r9
+ sbbq 56(%rcx), %rsi
+ sbbq 64(%rcx), %r8
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne .LBB136_2
+# BB#1: # %nocarry
+ movq %rbx, (%rdi)
+ movq %r15, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r14, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %rsi, 56(%rdi)
+ movq %r8, 64(%rdi)
+.LBB136_2: # %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end136:
+ .size mcl_fp_add9Lbmi2, .Lfunc_end136-mcl_fp_add9Lbmi2
+
+ .globl mcl_fp_addNF9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF9Lbmi2,@function
+mcl_fp_addNF9Lbmi2: # @mcl_fp_addNF9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, %r8
+ movq 64(%rdx), %r10
+ movq 56(%rdx), %r11
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rax
+ movq 32(%rdx), %rdi
+ movq 24(%rdx), %rbp
+ movq 16(%rdx), %r15
+ movq (%rdx), %rbx
+ movq 8(%rdx), %r13
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %r13
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %rbp
+ movq %rbp, -40(%rsp) # 8-byte Spill
+ adcq 32(%rsi), %rdi
+ movq %rdi, -16(%rsp) # 8-byte Spill
+ adcq 40(%rsi), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ adcq 48(%rsi), %r9
+ movq %r9, -32(%rsp) # 8-byte Spill
+ movq %r9, %rdi
+ adcq 56(%rsi), %r11
+ movq %r11, -24(%rsp) # 8-byte Spill
+ movq %r11, %rax
+ adcq 64(%rsi), %r10
+ movq %r10, %r9
+ movq %rbx, %rsi
+ subq (%rcx), %rsi
+ movq %r13, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r15, %r12
+ sbbq 16(%rcx), %r12
+ sbbq 24(%rcx), %rbp
+ movq -16(%rsp), %r14 # 8-byte Reload
+ sbbq 32(%rcx), %r14
+ movq -8(%rsp), %r11 # 8-byte Reload
+ sbbq 40(%rcx), %r11
+ movq %rdi, %r10
+ sbbq 48(%rcx), %r10
+ movq %rax, %rdi
+ sbbq 56(%rcx), %rdi
+ movq %r9, %rax
+ sbbq 64(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %rbx, %rsi
+ movq %rsi, (%r8)
+ cmovsq %r13, %rdx
+ movq %rdx, 8(%r8)
+ cmovsq %r15, %r12
+ movq %r12, 16(%r8)
+ cmovsq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%r8)
+ cmovsq -16(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, 32(%r8)
+ cmovsq -8(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 40(%r8)
+ cmovsq -32(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 48(%r8)
+ cmovsq -24(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 56(%r8)
+ cmovsq %r9, %rax
+ movq %rax, 64(%r8)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end137:
+ .size mcl_fp_addNF9Lbmi2, .Lfunc_end137-mcl_fp_addNF9Lbmi2
+
+ .globl mcl_fp_sub9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub9Lbmi2,@function
+mcl_fp_sub9Lbmi2: # @mcl_fp_sub9Lbmi2
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r13
+ movq (%rsi), %rax
+ movq 8(%rsi), %r9
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r9
+ movq 16(%rsi), %r10
+ sbbq 16(%rdx), %r10
+ movq 24(%rsi), %r11
+ sbbq 24(%rdx), %r11
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 40(%rsi), %r14
+ sbbq 40(%rdx), %r14
+ movq 48(%rsi), %r15
+ sbbq 48(%rdx), %r15
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %rsi
+ sbbq 56(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r12, 32(%rdi)
+ movq %r14, 40(%rdi)
+ movq %r15, 48(%rdi)
+ movq %rsi, 56(%rdi)
+ sbbq %r13, %r8
+ movq %r8, 64(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB138_2
+# BB#1: # %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ movq 8(%rcx), %rax
+ adcq %r9, %rax
+ movq %rax, 8(%rdi)
+ movq 16(%rcx), %rax
+ adcq %r10, %rax
+ movq %rax, 16(%rdi)
+ movq 24(%rcx), %rax
+ adcq %r11, %rax
+ movq %rax, 24(%rdi)
+ movq 32(%rcx), %rax
+ adcq %r12, %rax
+ movq %rax, 32(%rdi)
+ movq 40(%rcx), %rax
+ adcq %r14, %rax
+ movq %rax, 40(%rdi)
+ movq 48(%rcx), %rax
+ adcq %r15, %rax
+ movq %rax, 48(%rdi)
+ movq 56(%rcx), %rax
+ adcq %rsi, %rax
+ movq %rax, 56(%rdi)
+ movq 64(%rcx), %rax
+ adcq %r8, %rax
+ movq %rax, 64(%rdi)
+.LBB138_2: # %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end138:
+ .size mcl_fp_sub9Lbmi2, .Lfunc_end138-mcl_fp_sub9Lbmi2
+
+ .globl mcl_fp_subNF9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF9Lbmi2,@function
+mcl_fp_subNF9Lbmi2: # @mcl_fp_subNF9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq %rdi, %r10
+ movq 64(%rsi), %r14
+ movq 56(%rsi), %rax
+ movq 48(%rsi), %rcx
+ movq 40(%rsi), %rdi
+ movq 32(%rsi), %rbp
+ movq 24(%rsi), %rbx
+ movq 16(%rsi), %r15
+ movq (%rsi), %r13
+ movq 8(%rsi), %r12
+ subq (%rdx), %r13
+ sbbq 8(%rdx), %r12
+ sbbq 16(%rdx), %r15
+ sbbq 24(%rdx), %rbx
+ movq %rbx, -40(%rsp) # 8-byte Spill
+ sbbq 32(%rdx), %rbp
+ movq %rbp, -32(%rsp) # 8-byte Spill
+ sbbq 40(%rdx), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ sbbq 48(%rdx), %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ sbbq 56(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ sbbq 64(%rdx), %r14
+ movq %r14, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rbp
+ shldq $1, %r14, %rbp
+ movq 24(%r8), %rbx
+ andq %rbp, %rbx
+ movq 8(%r8), %rdi
+ andq %rbp, %rdi
+ andq (%r8), %rbp
+ movq 64(%r8), %r11
+ andq %rdx, %r11
+ rorxq $63, %rdx, %rax
+ andq 56(%r8), %rdx
+ movq 48(%r8), %r9
+ andq %rax, %r9
+ movq 40(%r8), %rsi
+ andq %rax, %rsi
+ movq 32(%r8), %rcx
+ andq %rax, %rcx
+ andq 16(%r8), %rax
+ addq %r13, %rbp
+ adcq %r12, %rdi
+ movq %rbp, (%r10)
+ adcq %r15, %rax
+ movq %rdi, 8(%r10)
+ adcq -40(%rsp), %rbx # 8-byte Folded Reload
+ movq %rax, 16(%r10)
+ movq %rbx, 24(%r10)
+ adcq -32(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 32(%r10)
+ adcq -24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%r10)
+ adcq -16(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%r10)
+ adcq -8(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 56(%r10)
+ adcq %r14, %r11
+ movq %r11, 64(%r10)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end139:
+ .size mcl_fp_subNF9Lbmi2, .Lfunc_end139-mcl_fp_subNF9Lbmi2
+
+ .globl mcl_fpDbl_add9Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add9Lbmi2,@function
+mcl_fpDbl_add9Lbmi2: # @mcl_fpDbl_add9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r15
+ movq 136(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 128(%rdx), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq 120(%rdx), %r10
+ movq 112(%rdx), %r11
+ movq 24(%rsi), %rcx
+ movq 32(%rsi), %r14
+ movq 16(%rdx), %rbp
+ movq (%rdx), %rax
+ movq 8(%rdx), %rbx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %rbp
+ adcq 24(%rdx), %rcx
+ adcq 32(%rdx), %r14
+ movq 104(%rdx), %r9
+ movq 96(%rdx), %r13
+ movq %rax, (%rdi)
+ movq 88(%rdx), %r8
+ movq %rbx, 8(%rdi)
+ movq 80(%rdx), %r12
+ movq %rbp, 16(%rdi)
+ movq 40(%rdx), %rax
+ movq %rcx, 24(%rdi)
+ movq 40(%rsi), %rbp
+ adcq %rax, %rbp
+ movq 48(%rdx), %rcx
+ movq %r14, 32(%rdi)
+ movq 48(%rsi), %rax
+ adcq %rcx, %rax
+ movq 56(%rdx), %r14
+ movq %rbp, 40(%rdi)
+ movq 56(%rsi), %rbp
+ adcq %r14, %rbp
+ movq 72(%rdx), %rcx
+ movq 64(%rdx), %rdx
+ movq %rax, 48(%rdi)
+ movq 64(%rsi), %rax
+ adcq %rdx, %rax
+ movq 136(%rsi), %rbx
+ movq %rbp, 56(%rdi)
+ movq 72(%rsi), %rbp
+ adcq %rcx, %rbp
+ movq 128(%rsi), %rcx
+ movq %rax, 64(%rdi)
+ movq 80(%rsi), %rdx
+ adcq %r12, %rdx
+ movq 88(%rsi), %r12
+ adcq %r8, %r12
+ movq 96(%rsi), %r14
+ adcq %r13, %r14
+ movq %r14, -48(%rsp) # 8-byte Spill
+ movq 104(%rsi), %rax
+ adcq %r9, %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 120(%rsi), %rax
+ movq 112(%rsi), %rsi
+ adcq %r11, %rsi
+ movq %rsi, -24(%rsp) # 8-byte Spill
+ adcq %r10, %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, -8(%rsp) # 8-byte Spill
+ sbbq %r9, %r9
+ andl $1, %r9d
+ movq %rbp, %r10
+ subq (%r15), %r10
+ movq %rdx, %r11
+ sbbq 8(%r15), %r11
+ movq %r12, %rbx
+ sbbq 16(%r15), %rbx
+ sbbq 24(%r15), %r14
+ movq -32(%rsp), %r13 # 8-byte Reload
+ sbbq 32(%r15), %r13
+ movq -24(%rsp), %rsi # 8-byte Reload
+ sbbq 40(%r15), %rsi
+ movq -16(%rsp), %rax # 8-byte Reload
+ sbbq 48(%r15), %rax
+ sbbq 56(%r15), %rcx
+ movq -8(%rsp), %r8 # 8-byte Reload
+ sbbq 64(%r15), %r8
+ sbbq $0, %r9
+ andl $1, %r9d
+ cmovneq %rbp, %r10
+ movq %r10, 72(%rdi)
+ testb %r9b, %r9b
+ cmovneq %rdx, %r11
+ movq %r11, 80(%rdi)
+ cmovneq %r12, %rbx
+ movq %rbx, 88(%rdi)
+ cmovneq -48(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, 96(%rdi)
+ cmovneq -32(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 104(%rdi)
+ cmovneq -24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 112(%rdi)
+ cmovneq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 120(%rdi)
+ cmovneq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 128(%rdi)
+ cmovneq -8(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 136(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end140:
+ .size mcl_fpDbl_add9Lbmi2, .Lfunc_end140-mcl_fpDbl_add9Lbmi2
+
+ .globl mcl_fpDbl_sub9Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub9Lbmi2,@function
+mcl_fpDbl_sub9Lbmi2: # @mcl_fpDbl_sub9Lbmi2
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r14
+ movq 136(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 128(%rdx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 120(%rdx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r11
+ movq (%rsi), %r12
+ movq 8(%rsi), %r13
+ xorl %r9d, %r9d
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r13
+ sbbq 16(%rdx), %r11
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %rbp
+ sbbq 32(%rdx), %rbp
+ movq 112(%rdx), %r10
+ movq 104(%rdx), %rcx
+ movq %r12, (%rdi)
+ movq 96(%rdx), %rax
+ movq %r13, 8(%rdi)
+ movq 88(%rdx), %r13
+ movq %r11, 16(%rdi)
+ movq 40(%rdx), %r11
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 48(%rdx), %r11
+ movq %rbp, 32(%rdi)
+ movq 48(%rsi), %rbp
+ sbbq %r11, %rbp
+ movq 56(%rdx), %r11
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 64(%rdx), %r11
+ movq %rbp, 48(%rdi)
+ movq 64(%rsi), %rbp
+ sbbq %r11, %rbp
+ movq 80(%rdx), %r8
+ movq 72(%rdx), %r11
+ movq %rbx, 56(%rdi)
+ movq 72(%rsi), %r15
+ sbbq %r11, %r15
+ movq 136(%rsi), %rdx
+ movq %rbp, 64(%rdi)
+ movq 80(%rsi), %rbp
+ sbbq %r8, %rbp
+ movq 88(%rsi), %r12
+ sbbq %r13, %r12
+ movq 96(%rsi), %r13
+ sbbq %rax, %r13
+ movq 104(%rsi), %rax
+ sbbq %rcx, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq 112(%rsi), %rax
+ sbbq %r10, %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 128(%rsi), %rax
+ movq 120(%rsi), %rcx
+ sbbq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ sbbq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -16(%rsp) # 8-byte Spill
+ sbbq -8(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movl $0, %r8d
+ sbbq $0, %r8
+ andl $1, %r8d
+ movq (%r14), %r10
+ cmoveq %r9, %r10
+ testb %r8b, %r8b
+ movq 16(%r14), %r8
+ cmoveq %r9, %r8
+ movq 8(%r14), %rdx
+ cmoveq %r9, %rdx
+ movq 64(%r14), %rbx
+ cmoveq %r9, %rbx
+ movq 56(%r14), %r11
+ cmoveq %r9, %r11
+ movq 48(%r14), %rsi
+ cmoveq %r9, %rsi
+ movq 40(%r14), %rcx
+ cmoveq %r9, %rcx
+ movq 32(%r14), %rax
+ cmoveq %r9, %rax
+ cmovneq 24(%r14), %r9
+ addq %r15, %r10
+ adcq %rbp, %rdx
+ movq %r10, 72(%rdi)
+ adcq %r12, %r8
+ movq %rdx, 80(%rdi)
+ adcq %r13, %r9
+ movq %r8, 88(%rdi)
+ movq %r9, 96(%rdi)
+ adcq -40(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 104(%rdi)
+ adcq -32(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 112(%rdi)
+ adcq -24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 120(%rdi)
+ adcq -16(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 128(%rdi)
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 136(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end141:
+ .size mcl_fpDbl_sub9Lbmi2, .Lfunc_end141-mcl_fpDbl_sub9Lbmi2
+
+
+ .section ".note.GNU-stack","",@progbits
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/x86-64.s b/vendor/github.com/tangerine-network/mcl/src/asm/x86-64.s
new file mode 100644
index 000000000..aa677d2ea
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/x86-64.s
@@ -0,0 +1,16652 @@
+ .text
+ .file "<stdin>"
+ .globl makeNIST_P192L
+ .align 16, 0x90
+ .type makeNIST_P192L,@function
+makeNIST_P192L: # @makeNIST_P192L
+# BB#0:
+ movq $-1, %rax
+ movq $-2, %rdx
+ movq $-1, %rcx
+ retq
+.Lfunc_end0:
+ .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L
+
+ .globl mcl_fpDbl_mod_NIST_P192L
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P192L,@function
+mcl_fpDbl_mod_NIST_P192L: # @mcl_fpDbl_mod_NIST_P192L
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 16(%rsi), %r10
+ movq 24(%rsi), %r8
+ movq 40(%rsi), %r9
+ movq 8(%rsi), %rax
+ addq %r9, %rax
+ adcq $0, %r10
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq 32(%rsi), %r11
+ movq (%rsi), %r14
+ addq %r8, %r14
+ adcq %r11, %rax
+ adcq %r9, %r10
+ adcq $0, %rcx
+ addq %r9, %r14
+ adcq %r8, %rax
+ adcq %r11, %r10
+ adcq $0, %rcx
+ addq %rcx, %r14
+ adcq %rax, %rcx
+ adcq $0, %r10
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r14, %rsi
+ addq $1, %rsi
+ movq %rcx, %rdx
+ adcq $1, %rdx
+ movq %r10, %rbx
+ adcq $0, %rbx
+ adcq $-1, %rax
+ andl $1, %eax
+ cmovneq %r14, %rsi
+ movq %rsi, (%rdi)
+ testb %al, %al
+ cmovneq %rcx, %rdx
+ movq %rdx, 8(%rdi)
+ cmovneq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end1:
+ .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L
+
+ .globl mcl_fp_sqr_NIST_P192L
+ .align 16, 0x90
+ .type mcl_fp_sqr_NIST_P192L,@function
+mcl_fp_sqr_NIST_P192L: # @mcl_fp_sqr_NIST_P192L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r11
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rcx
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %rdi
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %r15
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq %rbx
+ movq %rax, %r13
+ movq %rdx, %rcx
+ addq %rcx, %r12
+ adcq %r14, %r15
+ movq %rdi, %r10
+ adcq $0, %r10
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %r9
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rax, %r8
+ movq %rdx, %rsi
+ addq %r13, %rsi
+ adcq %rbp, %rcx
+ movq %r9, %rbx
+ adcq $0, %rbx
+ addq %r13, %rsi
+ adcq %r12, %rcx
+ adcq %r15, %rbx
+ adcq $0, %r10
+ movq %r11, %rax
+ mulq %r11
+ addq %r14, %r9
+ adcq %rdi, %rax
+ adcq $0, %rdx
+ addq %rbp, %rcx
+ adcq %rbx, %r9
+ adcq %r10, %rax
+ adcq $0, %rdx
+ addq %rdx, %rsi
+ adcq $0, %rcx
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq %r9, %r8
+ adcq %rax, %rsi
+ adcq %rdx, %rcx
+ adcq $0, %rbp
+ addq %rdx, %r8
+ adcq %r9, %rsi
+ adcq %rax, %rcx
+ adcq $0, %rbp
+ addq %rbp, %r8
+ adcq %rsi, %rbp
+ adcq $0, %rcx
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r8, %rdx
+ addq $1, %rdx
+ movq %rbp, %rsi
+ adcq $1, %rsi
+ movq %rcx, %rdi
+ adcq $0, %rdi
+ adcq $-1, %rax
+ andl $1, %eax
+ cmovneq %r8, %rdx
+ movq -8(%rsp), %rbx # 8-byte Reload
+ movq %rdx, (%rbx)
+ testb %al, %al
+ cmovneq %rbp, %rsi
+ movq %rsi, 8(%rbx)
+ cmovneq %rcx, %rdi
+ movq %rdi, 16(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end2:
+ .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L
+
+ .globl mcl_fp_mulNIST_P192L
+ .align 16, 0x90
+ .type mcl_fp_mulNIST_P192L,@function
+mcl_fp_mulNIST_P192L: # @mcl_fp_mulNIST_P192L
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ subq $56, %rsp
+ movq %rdi, %r14
+ leaq 8(%rsp), %rdi
+ callq mcl_fpDbl_mulPre3L@PLT
+ movq 24(%rsp), %r9
+ movq 32(%rsp), %r8
+ movq 48(%rsp), %rdi
+ movq 16(%rsp), %rbx
+ addq %rdi, %rbx
+ adcq $0, %r9
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq 40(%rsp), %rsi
+ movq 8(%rsp), %rdx
+ addq %r8, %rdx
+ adcq %rsi, %rbx
+ adcq %rdi, %r9
+ adcq $0, %rcx
+ addq %rdi, %rdx
+ adcq %r8, %rbx
+ adcq %rsi, %r9
+ adcq $0, %rcx
+ addq %rcx, %rdx
+ adcq %rbx, %rcx
+ adcq $0, %r9
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rdx, %rdi
+ addq $1, %rdi
+ movq %rcx, %rbx
+ adcq $1, %rbx
+ movq %r9, %rax
+ adcq $0, %rax
+ adcq $-1, %rsi
+ andl $1, %esi
+ cmovneq %rdx, %rdi
+ movq %rdi, (%r14)
+ testb %sil, %sil
+ cmovneq %rcx, %rbx
+ movq %rbx, 8(%r14)
+ cmovneq %r9, %rax
+ movq %rax, 16(%r14)
+ addq $56, %rsp
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end3:
+ .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L
+
+ .globl mcl_fpDbl_mod_NIST_P521L
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P521L,@function
+mcl_fpDbl_mod_NIST_P521L: # @mcl_fpDbl_mod_NIST_P521L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 120(%rsi), %r9
+ movq 128(%rsi), %r14
+ movq %r14, %r8
+ shldq $55, %r9, %r8
+ movq 112(%rsi), %r10
+ shldq $55, %r10, %r9
+ movq 104(%rsi), %r11
+ shldq $55, %r11, %r10
+ movq 96(%rsi), %r15
+ shldq $55, %r15, %r11
+ movq 88(%rsi), %r12
+ shldq $55, %r12, %r15
+ movq 80(%rsi), %rcx
+ shldq $55, %rcx, %r12
+ movq 64(%rsi), %rbx
+ movq 72(%rsi), %rax
+ shldq $55, %rax, %rcx
+ shrq $9, %r14
+ shldq $55, %rbx, %rax
+ andl $511, %ebx # imm = 0x1FF
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r12
+ adcq 24(%rsi), %r15
+ adcq 32(%rsi), %r11
+ adcq 40(%rsi), %r10
+ adcq 48(%rsi), %r9
+ adcq 56(%rsi), %r8
+ adcq %r14, %rbx
+ movq %rbx, %rsi
+ shrq $9, %rsi
+ andl $1, %esi
+ addq %rax, %rsi
+ adcq $0, %rcx
+ adcq $0, %r12
+ adcq $0, %r15
+ adcq $0, %r11
+ adcq $0, %r10
+ adcq $0, %r9
+ adcq $0, %r8
+ adcq $0, %rbx
+ movq %rsi, %rax
+ andq %r12, %rax
+ andq %r15, %rax
+ andq %r11, %rax
+ andq %r10, %rax
+ andq %r9, %rax
+ andq %r8, %rax
+ movq %rbx, %rdx
+ orq $-512, %rdx # imm = 0xFFFFFFFFFFFFFE00
+ andq %rax, %rdx
+ andq %rcx, %rdx
+ cmpq $-1, %rdx
+ je .LBB4_1
+# BB#3: # %nonzero
+ movq %rsi, (%rdi)
+ movq %rcx, 8(%rdi)
+ movq %r12, 16(%rdi)
+ movq %r15, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %r8, 56(%rdi)
+ andl $511, %ebx # imm = 0x1FF
+ movq %rbx, 64(%rdi)
+ jmp .LBB4_2
+.LBB4_1: # %zero
+ movq $0, 64(%rdi)
+ movq $0, 56(%rdi)
+ movq $0, 48(%rdi)
+ movq $0, 40(%rdi)
+ movq $0, 32(%rdi)
+ movq $0, 24(%rdi)
+ movq $0, 16(%rdi)
+ movq $0, 8(%rdi)
+ movq $0, (%rdi)
+.LBB4_2: # %zero
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end4:
+ .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L
+
+ .globl mcl_fp_mulUnitPre1L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre1L,@function
+mcl_fp_mulUnitPre1L: # @mcl_fp_mulUnitPre1L
+# BB#0:
+ movq %rdx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ retq
+.Lfunc_end5:
+ .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L
+
+ .globl mcl_fpDbl_mulPre1L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre1L,@function
+mcl_fpDbl_mulPre1L: # @mcl_fpDbl_mulPre1L
+# BB#0:
+ movq (%rdx), %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ retq
+.Lfunc_end6:
+ .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L
+
+ .globl mcl_fpDbl_sqrPre1L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre1L,@function
+mcl_fpDbl_sqrPre1L: # @mcl_fpDbl_sqrPre1L
+# BB#0:
+ movq (%rsi), %rax
+ mulq %rax
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ retq
+.Lfunc_end7:
+ .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L
+
+ .globl mcl_fp_mont1L
+ .align 16, 0x90
+ .type mcl_fp_mont1L,@function
+mcl_fp_mont1L: # @mcl_fp_mont1L
+# BB#0:
+ movq (%rsi), %rax
+ mulq (%rdx)
+ movq %rax, %rsi
+ movq %rdx, %r8
+ movq -8(%rcx), %rax
+ imulq %rsi, %rax
+ movq (%rcx), %rcx
+ mulq %rcx
+ addq %rsi, %rax
+ adcq %r8, %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rsi
+ subq %rcx, %rsi
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rsi
+ movq %rsi, (%rdi)
+ retq
+.Lfunc_end8:
+ .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L
+
+ .globl mcl_fp_montNF1L
+ .align 16, 0x90
+ .type mcl_fp_montNF1L,@function
+mcl_fp_montNF1L: # @mcl_fp_montNF1L
+# BB#0:
+ movq (%rsi), %rax
+ mulq (%rdx)
+ movq %rax, %rsi
+ movq %rdx, %r8
+ movq -8(%rcx), %rax
+ imulq %rsi, %rax
+ movq (%rcx), %rcx
+ mulq %rcx
+ addq %rsi, %rax
+ adcq %r8, %rdx
+ movq %rdx, %rax
+ subq %rcx, %rax
+ cmovsq %rdx, %rax
+ movq %rax, (%rdi)
+ retq
+.Lfunc_end9:
+ .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L
+
+ .globl mcl_fp_montRed1L
+ .align 16, 0x90
+ .type mcl_fp_montRed1L,@function
+mcl_fp_montRed1L: # @mcl_fp_montRed1L
+# BB#0:
+ movq (%rsi), %rcx
+ movq -8(%rdx), %rax
+ imulq %rcx, %rax
+ movq (%rdx), %r8
+ mulq %r8
+ addq %rcx, %rax
+ adcq 8(%rsi), %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rcx
+ subq %r8, %rcx
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rcx
+ movq %rcx, (%rdi)
+ retq
+.Lfunc_end10:
+ .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L
+
+ .globl mcl_fp_addPre1L
+ .align 16, 0x90
+ .type mcl_fp_addPre1L,@function
+mcl_fp_addPre1L: # @mcl_fp_addPre1L
+# BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, (%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end11:
+ .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L
+
+ .globl mcl_fp_subPre1L
+ .align 16, 0x90
+ .type mcl_fp_subPre1L,@function
+mcl_fp_subPre1L: # @mcl_fp_subPre1L
+# BB#0:
+ movq (%rsi), %rcx
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ movq %rcx, (%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end12:
+ .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L
+
+ .globl mcl_fp_shr1_1L
+ .align 16, 0x90
+ .type mcl_fp_shr1_1L,@function
+mcl_fp_shr1_1L: # @mcl_fp_shr1_1L
+# BB#0:
+ movq (%rsi), %rax
+ shrq %rax
+ movq %rax, (%rdi)
+ retq
+.Lfunc_end13:
+ .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L
+
+ .globl mcl_fp_add1L
+ .align 16, 0x90
+ .type mcl_fp_add1L,@function
+mcl_fp_add1L: # @mcl_fp_add1L
+# BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, (%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %rax
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne .LBB14_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+.LBB14_2: # %carry
+ retq
+.Lfunc_end14:
+ .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L
+
+ .globl mcl_fp_addNF1L
+ .align 16, 0x90
+ .type mcl_fp_addNF1L,@function
+mcl_fp_addNF1L: # @mcl_fp_addNF1L
+# BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, %rdx
+ subq (%rcx), %rdx
+ cmovsq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+.Lfunc_end15:
+ .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L
+
+ .globl mcl_fp_sub1L
+ .align 16, 0x90
+ .type mcl_fp_sub1L,@function
+mcl_fp_sub1L: # @mcl_fp_sub1L
+# BB#0:
+ movq (%rsi), %rax
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ movq %rax, (%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB16_2
+# BB#1: # %nocarry
+ retq
+.LBB16_2: # %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ retq
+.Lfunc_end16:
+ .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L
+
+ .globl mcl_fp_subNF1L
+ .align 16, 0x90
+ .type mcl_fp_subNF1L,@function
+mcl_fp_subNF1L: # @mcl_fp_subNF1L
+# BB#0:
+ movq (%rsi), %rax
+ subq (%rdx), %rax
+ movq %rax, %rdx
+ sarq $63, %rdx
+ andq (%rcx), %rdx
+ addq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+.Lfunc_end17:
+ .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L
+
+ .globl mcl_fpDbl_add1L
+ .align 16, 0x90
+ .type mcl_fpDbl_add1L,@function
+mcl_fpDbl_add1L: # @mcl_fpDbl_add1L
+# BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ movq %rax, (%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rsi
+ subq (%rcx), %rsi
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ retq
+.Lfunc_end18:
+ .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L
+
+ .globl mcl_fpDbl_sub1L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub1L,@function
+mcl_fpDbl_sub1L: # @mcl_fpDbl_sub1L
+# BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %r8
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r8
+ movq %rax, (%rdi)
+ movl $0, %eax
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq (%rcx), %rsi
+ addq %r8, %rsi
+ movq %rsi, 8(%rdi)
+ retq
+.Lfunc_end19:
+ .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L
+
+ .globl mcl_fp_mulUnitPre2L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre2L,@function
+mcl_fp_mulUnitPre2L: # @mcl_fp_mulUnitPre2L
+# BB#0:
+ movq %rdx, %r8
+ movq %r8, %rax
+ mulq 8(%rsi)
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %r8, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq $0, %rcx
+ movq %rcx, 16(%rdi)
+ retq
+.Lfunc_end20:
+ .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L
+
+ .globl mcl_fpDbl_mulPre2L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre2L,@function
+mcl_fpDbl_mulPre2L: # @mcl_fpDbl_mulPre2L
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq %rdx, %r10
+ movq (%rsi), %r8
+ movq 8(%rsi), %r11
+ movq (%r10), %rcx
+ movq %r8, %rax
+ mulq %rcx
+ movq %rdx, %r9
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %r14
+ movq %rax, %rsi
+ addq %r9, %rsi
+ adcq $0, %r14
+ movq 8(%r10), %rbx
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %r9
+ movq %rax, %rcx
+ movq %r8, %rax
+ mulq %rbx
+ addq %rsi, %rax
+ movq %rax, 8(%rdi)
+ adcq %r14, %rcx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rcx
+ movq %rcx, 16(%rdi)
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end21:
+ .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L
+
+ .globl mcl_fpDbl_sqrPre2L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre2L,@function
+mcl_fpDbl_sqrPre2L: # @mcl_fpDbl_sqrPre2L
+# BB#0:
+ movq (%rsi), %rcx
+ movq 8(%rsi), %r8
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %rsi
+ movq %rax, (%rdi)
+ movq %r8, %rax
+ mulq %rcx
+ movq %rdx, %r9
+ movq %rax, %r10
+ addq %r10, %rsi
+ movq %r9, %rcx
+ adcq $0, %rcx
+ movq %r8, %rax
+ mulq %r8
+ addq %r10, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %rcx, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r9, %rax
+ movq %rax, 16(%rdi)
+ adcq %rdx, %rcx
+ movq %rcx, 24(%rdi)
+ retq
+.Lfunc_end22:
+ .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L
+
+ .globl mcl_fp_mont2L
+ .align 16, 0x90
+ .type mcl_fp_mont2L,@function
+mcl_fp_mont2L: # @mcl_fp_mont2L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq (%rsi), %r8
+ movq 8(%rsi), %r11
+ movq (%rdx), %rsi
+ movq 8(%rdx), %r9
+ movq %r11, %rax
+ mulq %rsi
+ movq %rdx, %r15
+ movq %rax, %r10
+ movq %r8, %rax
+ mulq %rsi
+ movq %rax, %r14
+ movq %rdx, %r13
+ addq %r10, %r13
+ adcq $0, %r15
+ movq -8(%rcx), %r10
+ movq (%rcx), %rbp
+ movq %r14, %rsi
+ imulq %r10, %rsi
+ movq 8(%rcx), %rdi
+ movq %rsi, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %rbx
+ addq %r12, %rbx
+ adcq $0, %rcx
+ addq %r14, %rax
+ adcq %r13, %rbx
+ adcq %r15, %rcx
+ sbbq %r15, %r15
+ andl $1, %r15d
+ movq %r9, %rax
+ mulq %r11
+ movq %rdx, %r14
+ movq %rax, %r11
+ movq %r9, %rax
+ mulq %r8
+ movq %rax, %r8
+ movq %rdx, %rsi
+ addq %r11, %rsi
+ adcq $0, %r14
+ addq %rbx, %r8
+ adcq %rcx, %rsi
+ adcq %r15, %r14
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ imulq %r8, %r10
+ movq %r10, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %r10, %rax
+ mulq %rbp
+ addq %r9, %rdx
+ adcq $0, %rcx
+ addq %r8, %rax
+ adcq %rsi, %rdx
+ adcq %r14, %rcx
+ adcq $0, %rbx
+ movq %rdx, %rax
+ subq %rbp, %rax
+ movq %rcx, %rsi
+ sbbq %rdi, %rsi
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %rcx, %rsi
+ testb %bl, %bl
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rcx # 8-byte Reload
+ movq %rax, (%rcx)
+ movq %rsi, 8(%rcx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end23:
+ .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L
+
+ .globl mcl_fp_montNF2L
+ .align 16, 0x90
+ .type mcl_fp_montNF2L,@function
+mcl_fp_montNF2L: # @mcl_fp_montNF2L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq (%rsi), %r8
+ movq 8(%rsi), %r11
+ movq (%rdx), %rbp
+ movq 8(%rdx), %r9
+ movq %r8, %rax
+ mulq %rbp
+ movq %rax, %rsi
+ movq %rdx, %r14
+ movq -8(%rcx), %r10
+ movq (%rcx), %r15
+ movq %rsi, %rbx
+ imulq %r10, %rbx
+ movq 8(%rcx), %rdi
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq %r15
+ movq %rdx, %r12
+ movq %rax, %rbx
+ movq %r11, %rax
+ mulq %rbp
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ addq %r14, %rbp
+ adcq $0, %rcx
+ addq %rsi, %rbx
+ adcq %r13, %rbp
+ adcq $0, %rcx
+ addq %r12, %rbp
+ adcq -16(%rsp), %rcx # 8-byte Folded Reload
+ movq %r9, %rax
+ mulq %r11
+ movq %rdx, %rsi
+ movq %rax, %r11
+ movq %r9, %rax
+ mulq %r8
+ movq %rax, %r8
+ movq %rdx, %rbx
+ addq %r11, %rbx
+ adcq $0, %rsi
+ addq %rbp, %r8
+ adcq %rcx, %rbx
+ adcq $0, %rsi
+ imulq %r8, %r10
+ movq %r10, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ movq %r10, %rax
+ mulq %r15
+ addq %r8, %rax
+ adcq %rbx, %rbp
+ adcq $0, %rsi
+ addq %rdx, %rbp
+ adcq %rcx, %rsi
+ movq %rbp, %rax
+ subq %r15, %rax
+ movq %rsi, %rcx
+ sbbq %rdi, %rcx
+ cmovsq %rbp, %rax
+ movq -8(%rsp), %rdx # 8-byte Reload
+ movq %rax, (%rdx)
+ cmovsq %rsi, %rcx
+ movq %rcx, 8(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end24:
+ .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L
+
+ .globl mcl_fp_montRed2L
+ .align 16, 0x90
+ .type mcl_fp_montRed2L,@function
+mcl_fp_montRed2L: # @mcl_fp_montRed2L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq -8(%rdx), %r9
+ movq (%rdx), %r11
+ movq (%rsi), %rbx
+ movq %rbx, %rcx
+ imulq %r9, %rcx
+ movq 8(%rdx), %r14
+ movq %rcx, %rax
+ mulq %r14
+ movq %rdx, %r8
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq %r11
+ movq %rdx, %rcx
+ addq %r10, %rcx
+ adcq $0, %r8
+ movq 24(%rsi), %r15
+ addq %rbx, %rax
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r8
+ adcq $0, %r15
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ imulq %rcx, %r9
+ movq %r9, %rax
+ mulq %r14
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %r9, %rax
+ mulq %r11
+ addq %r10, %rdx
+ adcq $0, %rsi
+ addq %rcx, %rax
+ adcq %r8, %rdx
+ adcq %r15, %rsi
+ adcq $0, %rbx
+ movq %rdx, %rax
+ subq %r11, %rax
+ movq %rsi, %rcx
+ sbbq %r14, %rcx
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %rsi, %rcx
+ testb %bl, %bl
+ cmovneq %rdx, %rax
+ movq %rax, (%rdi)
+ movq %rcx, 8(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end25:
+ .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L
+
+ .globl mcl_fp_addPre2L
+ .align 16, 0x90
+ .type mcl_fp_addPre2L,@function
+mcl_fp_addPre2L: # @mcl_fp_addPre2L
+# BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rcx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rcx
+ movq %rax, (%rdi)
+ movq %rcx, 8(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end26:
+ .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L
+
+ .globl mcl_fp_subPre2L
+ .align 16, 0x90
+ .type mcl_fp_subPre2L,@function
+mcl_fp_subPre2L: # @mcl_fp_subPre2L
+# BB#0:
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end27:
+ .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L
+
+ .globl mcl_fp_shr1_2L
+ .align 16, 0x90
+ .type mcl_fp_shr1_2L,@function
+mcl_fp_shr1_2L: # @mcl_fp_shr1_2L
+# BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %rcx
+ shrdq $1, %rcx, %rax
+ movq %rax, (%rdi)
+ shrq %rcx
+ movq %rcx, 8(%rdi)
+ retq
+.Lfunc_end28:
+ .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L
+
+ .globl mcl_fp_add2L
+ .align 16, 0x90
+ .type mcl_fp_add2L,@function
+mcl_fp_add2L: # @mcl_fp_add2L
+# BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB29_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+.LBB29_2: # %carry
+ retq
+.Lfunc_end29:
+ .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L
+
+ .globl mcl_fp_addNF2L
+ .align 16, 0x90
+ .type mcl_fp_addNF2L,@function
+mcl_fp_addNF2L: # @mcl_fp_addNF2L
+# BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %r8
+ addq (%rsi), %rax
+ adcq 8(%rsi), %r8
+ movq %rax, %rsi
+ subq (%rcx), %rsi
+ movq %r8, %rdx
+ sbbq 8(%rcx), %rdx
+ testq %rdx, %rdx
+ cmovsq %rax, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r8, %rdx
+ movq %rdx, 8(%rdi)
+ retq
+.Lfunc_end30:
+ .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L
+
+ .globl mcl_fp_sub2L
+ .align 16, 0x90
+ .type mcl_fp_sub2L,@function
+mcl_fp_sub2L: # @mcl_fp_sub2L
+# BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %r8
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r8
+ movq %rax, (%rdi)
+ movq %r8, 8(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB31_2
+# BB#1: # %nocarry
+ retq
+.LBB31_2: # %carry
+ movq 8(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r8, %rdx
+ movq %rdx, 8(%rdi)
+ retq
+.Lfunc_end31:
+ .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L
+
+ .globl mcl_fp_subNF2L
+ .align 16, 0x90
+ .type mcl_fp_subNF2L,@function
+mcl_fp_subNF2L: # @mcl_fp_subNF2L
+# BB#0:
+ movq (%rsi), %r8
+ movq 8(%rsi), %rsi
+ subq (%rdx), %r8
+ sbbq 8(%rdx), %rsi
+ movq %rsi, %rdx
+ sarq $63, %rdx
+ movq 8(%rcx), %rax
+ andq %rdx, %rax
+ andq (%rcx), %rdx
+ addq %r8, %rdx
+ movq %rdx, (%rdi)
+ adcq %rsi, %rax
+ movq %rax, 8(%rdi)
+ retq
+.Lfunc_end32:
+ .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L
+
+ .globl mcl_fpDbl_add2L
+ .align 16, 0x90
+ .type mcl_fpDbl_add2L,@function
+mcl_fpDbl_add2L: # @mcl_fpDbl_add2L
+# BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rdx), %r10
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r10
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ adcq %r8, %r9
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r10, %rdx
+ subq (%rcx), %rdx
+ movq %r9, %rsi
+ sbbq 8(%rcx), %rsi
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %r10, %rdx
+ movq %rdx, 16(%rdi)
+ testb %al, %al
+ cmovneq %r9, %rsi
+ movq %rsi, 24(%rdi)
+ retq
+.Lfunc_end33:
+ .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L
+
+ .globl mcl_fpDbl_sub2L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub2L,@function
+mcl_fpDbl_sub2L: # @mcl_fpDbl_sub2L
+# BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %r11
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %r11
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %r11, (%rdi)
+ movq %rsi, 8(%rdi)
+ sbbq %r8, %r9
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ cmovneq 8(%rcx), %rax
+ addq %r10, %rsi
+ movq %rsi, 16(%rdi)
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ retq
+.Lfunc_end34:
+ .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L
+
+ .globl mcl_fp_mulUnitPre3L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre3L,@function
+mcl_fp_mulUnitPre3L: # @mcl_fp_mulUnitPre3L
+# BB#0:
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r9, %r10
+ movq %r10, 16(%rdi)
+ adcq $0, %r8
+ movq %r8, 24(%rdi)
+ retq
+.Lfunc_end35:
+ .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L
+
+ .globl mcl_fpDbl_mulPre3L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre3L,@function
+mcl_fpDbl_mulPre3L: # @mcl_fpDbl_mulPre3L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r10
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ movq (%r10), %rbx
+ movq %r8, %rax
+ mulq %rbx
+ movq %rdx, %rcx
+ movq 16(%rsi), %r11
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %r14
+ movq %rax, %rsi
+ movq %r9, %rax
+ mulq %rbx
+ movq %rdx, %r15
+ movq %rax, %rbx
+ addq %rcx, %rbx
+ adcq %rsi, %r15
+ adcq $0, %r14
+ movq 8(%r10), %rcx
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %r9, %rax
+ mulq %rcx
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %r8, %rax
+ mulq %rcx
+ addq %rbx, %rax
+ movq %rax, 8(%rdi)
+ adcq %r15, %rsi
+ adcq %r14, %rbp
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq %rdx, %rsi
+ adcq %r13, %rbp
+ adcq %r12, %r14
+ movq 16(%r10), %r15
+ movq %r11, %rax
+ mulq %r15
+ movq %rdx, %r10
+ movq %rax, %rbx
+ movq %r9, %rax
+ mulq %r15
+ movq %rdx, %r9
+ movq %rax, %rcx
+ movq %r8, %rax
+ mulq %r15
+ addq %rsi, %rax
+ movq %rax, 16(%rdi)
+ adcq %rbp, %rcx
+ adcq %r14, %rbx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rcx
+ movq %rcx, 24(%rdi)
+ adcq %r9, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r10, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end36:
+ .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L
+
+ .globl mcl_fpDbl_sqrPre3L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre3L,@function
+mcl_fpDbl_sqrPre3L: # @mcl_fpDbl_sqrPre3L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 16(%rsi), %r10
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %rbx
+ movq %rax, (%rdi)
+ movq %r10, %rax
+ mulq %rcx
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq %rcx
+ movq %rdx, %r14
+ movq %rax, %r12
+ addq %r12, %rbx
+ movq %r14, %r13
+ adcq %r11, %r13
+ movq %r8, %rcx
+ adcq $0, %rcx
+ movq %r10, %rax
+ mulq %rsi
+ movq %rdx, %r9
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq %rsi
+ movq %rax, %rsi
+ addq %r12, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r13, %rsi
+ adcq %r15, %rcx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq %r14, %rsi
+ adcq %rdx, %rcx
+ adcq %r9, %rbx
+ movq %r10, %rax
+ mulq %r10
+ addq %r11, %rsi
+ movq %rsi, 16(%rdi)
+ adcq %r15, %rcx
+ adcq %rbx, %rax
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq %r8, %rcx
+ movq %rcx, 24(%rdi)
+ adcq %r9, %rax
+ movq %rax, 32(%rdi)
+ adcq %rdx, %rsi
+ movq %rsi, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end37:
+ .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L
+
+ .globl mcl_fp_mont3L
+ .align 16, 0x90
+ .type mcl_fp_mont3L,@function
+mcl_fp_mont3L: # @mcl_fp_mont3L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r10
+ movq %r10, -56(%rsp) # 8-byte Spill
+ movq %rdi, -48(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq (%r10), %rdi
+ mulq %rdi
+ movq %rax, %rbp
+ movq %rdx, %r8
+ movq (%rsi), %rbx
+ movq %rbx, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rdx, %r15
+ movq %rax, %rsi
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rax, %r12
+ movq %rdx, %r11
+ addq %rsi, %r11
+ adcq %rbp, %r15
+ adcq $0, %r8
+ movq -8(%rcx), %r14
+ movq (%rcx), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ movq %r12, %rbp
+ imulq %r14, %rbp
+ movq 16(%rcx), %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rbx
+ movq %rbx, -8(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rdx, %rcx
+ movq %rax, %r13
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, %rsi
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rdx, %rbp
+ addq %r9, %rbp
+ adcq %r13, %rsi
+ adcq $0, %rcx
+ addq %r12, %rax
+ adcq %r11, %rbp
+ movq 8(%r10), %rbx
+ adcq %r15, %rsi
+ adcq %r8, %rcx
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ movq %rbx, %rax
+ movq -64(%rsp), %r10 # 8-byte Reload
+ mulq %r10
+ movq %rdx, %r15
+ movq %rax, %r9
+ movq %rbx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rax, %r8
+ movq %rdx, %rbx
+ addq %r11, %rbx
+ adcq %r9, %r12
+ adcq $0, %r15
+ addq %rbp, %r8
+ adcq %rsi, %rbx
+ adcq %rcx, %r12
+ adcq %rdi, %r15
+ sbbq %r11, %r11
+ andl $1, %r11d
+ movq %r8, %rcx
+ imulq %r14, %rcx
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ addq %rdi, %rbp
+ adcq %r9, %rsi
+ adcq $0, %r13
+ addq %r8, %rax
+ adcq %rbx, %rbp
+ adcq %r12, %rsi
+ adcq %r15, %r13
+ adcq $0, %r11
+ movq -56(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rcx
+ movq %rcx, %rax
+ mulq %r10
+ movq %rdx, %r8
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %rcx
+ addq %rdi, %rcx
+ adcq %r10, %r15
+ adcq $0, %r8
+ addq %rbp, %r9
+ adcq %rsi, %rcx
+ adcq %r13, %r15
+ adcq %r11, %r8
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ imulq %r9, %r14
+ movq %r14, %rax
+ movq -16(%rsp), %r12 # 8-byte Reload
+ mulq %r12
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %r14, %rax
+ movq -8(%rsp), %r13 # 8-byte Reload
+ mulq %r13
+ movq %rdx, %rsi
+ movq %rax, %r11
+ movq %r14, %rax
+ movq -24(%rsp), %rbp # 8-byte Reload
+ mulq %rbp
+ addq %r11, %rdx
+ adcq %r10, %rsi
+ adcq $0, %rbx
+ addq %r9, %rax
+ adcq %rcx, %rdx
+ adcq %r15, %rsi
+ adcq %r8, %rbx
+ adcq $0, %rdi
+ movq %rdx, %rax
+ subq %rbp, %rax
+ movq %rsi, %rcx
+ sbbq %r13, %rcx
+ movq %rbx, %rbp
+ sbbq %r12, %rbp
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %rbx, %rbp
+ testb %dil, %dil
+ cmovneq %rdx, %rax
+ movq -48(%rsp), %rdx # 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rsi, %rcx
+ movq %rcx, 8(%rdx)
+ movq %rbp, 16(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end38:
+ .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L
+
+ .globl mcl_fp_montNF3L
+ .align 16, 0x90
+ .type mcl_fp_montNF3L,@function
+mcl_fp_montNF3L: # @mcl_fp_montNF3L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rdi, -32(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r10
+ movq %r10, -40(%rsp) # 8-byte Spill
+ movq (%rdx), %rbp
+ movq %r10, %rax
+ mulq %rbp
+ movq %rax, %r14
+ movq %rdx, %r15
+ movq (%rsi), %rbx
+ movq %rbx, -64(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulq %rbp
+ movq %rdx, %rdi
+ movq %rax, %rsi
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rax, %r13
+ movq %rdx, %rbp
+ addq %rsi, %rbp
+ adcq %r14, %rdi
+ adcq $0, %r15
+ movq -8(%rcx), %r14
+ movq (%rcx), %r11
+ movq %r11, -48(%rsp) # 8-byte Spill
+ movq %r13, %rbx
+ imulq %r14, %rbx
+ movq 16(%rcx), %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rdx, %r8
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rdx, %r9
+ movq %rax, %rcx
+ movq %rbx, %rax
+ mulq %r11
+ addq %r13, %rax
+ adcq %rbp, %rcx
+ adcq %rdi, %r12
+ adcq $0, %r15
+ addq %rdx, %rcx
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rbp
+ adcq %r9, %r12
+ adcq %r8, %r15
+ movq %rbp, %rax
+ mulq %r10
+ movq %rdx, %rsi
+ movq %rax, %r8
+ movq %rbp, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r9
+ movq %rbp, %rax
+ movq -64(%rsp), %r10 # 8-byte Reload
+ mulq %r10
+ movq %rax, %r13
+ movq %rdx, %rbp
+ addq %r9, %rbp
+ adcq %r8, %rbx
+ adcq $0, %rsi
+ addq %rcx, %r13
+ adcq %r12, %rbp
+ adcq %r15, %rbx
+ adcq $0, %rsi
+ movq %r13, %rcx
+ imulq %r14, %rcx
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r15
+ movq %rcx, %rax
+ movq -56(%rsp), %rdi # 8-byte Reload
+ mulq %rdi
+ movq %rdx, %r9
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq %r11
+ addq %r13, %rax
+ adcq %rbp, %r12
+ adcq %rbx, %r15
+ adcq $0, %rsi
+ addq %rdx, %r12
+ adcq %r9, %r15
+ adcq %r8, %rsi
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rbx
+ movq %rbx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %rbx, %rax
+ mulq %r10
+ movq %rax, %r10
+ movq %rdx, %rbx
+ addq %r9, %rbx
+ adcq %r8, %rcx
+ adcq $0, %rbp
+ addq %r12, %r10
+ adcq %r15, %rbx
+ adcq %rsi, %rcx
+ adcq $0, %rbp
+ imulq %r10, %r14
+ movq %r14, %rax
+ movq -16(%rsp), %r15 # 8-byte Reload
+ mulq %r15
+ movq %rdx, %r8
+ movq %rax, %rsi
+ movq %r14, %rax
+ movq %rdi, %r11
+ mulq %r11
+ movq %rdx, %r9
+ movq %rax, %rdi
+ movq %r14, %rax
+ movq -48(%rsp), %r14 # 8-byte Reload
+ mulq %r14
+ addq %r10, %rax
+ adcq %rbx, %rdi
+ adcq %rcx, %rsi
+ adcq $0, %rbp
+ addq %rdx, %rdi
+ adcq %r9, %rsi
+ adcq %r8, %rbp
+ movq %rdi, %rax
+ subq %r14, %rax
+ movq %rsi, %rcx
+ sbbq %r11, %rcx
+ movq %rbp, %rbx
+ sbbq %r15, %rbx
+ movq %rbx, %rdx
+ sarq $63, %rdx
+ cmovsq %rdi, %rax
+ movq -32(%rsp), %rdx # 8-byte Reload
+ movq %rax, (%rdx)
+ cmovsq %rsi, %rcx
+ movq %rcx, 8(%rdx)
+ cmovsq %rbp, %rbx
+ movq %rbx, 16(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end39:
+ .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L
+
+ .globl mcl_fp_montRed3L
+ .align 16, 0x90
+ .type mcl_fp_montRed3L,@function
+mcl_fp_montRed3L: # @mcl_fp_montRed3L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq -8(%rcx), %r9
+ movq (%rcx), %rdi
+ movq %rdi, -16(%rsp) # 8-byte Spill
+ movq (%rsi), %r15
+ movq %r15, %rbx
+ imulq %r9, %rbx
+ movq 16(%rcx), %rbp
+ movq %rbp, -24(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rax, %r11
+ movq %rdx, %r8
+ movq 8(%rcx), %rcx
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rcx, %r12
+ movq %rdx, %r10
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdi, %rbx
+ movq %rdx, %rcx
+ addq %r14, %rcx
+ adcq %r11, %r10
+ adcq $0, %r8
+ movq 40(%rsi), %rdi
+ movq 32(%rsi), %r13
+ addq %r15, %rax
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r10
+ adcq 24(%rsi), %r8
+ adcq $0, %r13
+ adcq $0, %rdi
+ sbbq %r15, %r15
+ andl $1, %r15d
+ movq %rcx, %rsi
+ imulq %r9, %rsi
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %r11
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq %r12
+ movq %rdx, %r14
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, %rbx
+ addq %r12, %rbx
+ adcq %rbp, %r14
+ adcq $0, %r11
+ addq %rcx, %rax
+ adcq %r10, %rbx
+ adcq %r8, %r14
+ adcq %r13, %r11
+ adcq $0, %rdi
+ adcq $0, %r15
+ imulq %rbx, %r9
+ movq %r9, %rax
+ movq -24(%rsp), %r12 # 8-byte Reload
+ mulq %r12
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %r9, %rax
+ movq -32(%rsp), %r13 # 8-byte Reload
+ mulq %r13
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %r9, %rax
+ movq -16(%rsp), %rcx # 8-byte Reload
+ mulq %rcx
+ addq %r10, %rdx
+ adcq %r8, %rsi
+ adcq $0, %rbp
+ addq %rbx, %rax
+ adcq %r14, %rdx
+ adcq %r11, %rsi
+ adcq %rdi, %rbp
+ adcq $0, %r15
+ movq %rdx, %rax
+ subq %rcx, %rax
+ movq %rsi, %rdi
+ sbbq %r13, %rdi
+ movq %rbp, %rcx
+ sbbq %r12, %rcx
+ sbbq $0, %r15
+ andl $1, %r15d
+ cmovneq %rbp, %rcx
+ testb %r15b, %r15b
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rdx # 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rsi, %rdi
+ movq %rdi, 8(%rdx)
+ movq %rcx, 16(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end40:
+ .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L
+
+ .globl mcl_fp_addPre3L
+ .align 16, 0x90
+ .type mcl_fp_addPre3L,@function
+mcl_fp_addPre3L: # @mcl_fp_addPre3L
+# BB#0:
+ movq 16(%rdx), %rax
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rax
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end41:
+ .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L
+
+ .globl mcl_fp_subPre3L
+ .align 16, 0x90
+ .type mcl_fp_subPre3L,@function
+mcl_fp_subPre3L: # @mcl_fp_subPre3L
+# BB#0:
+ movq 16(%rsi), %r8
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r8
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end42:
+ .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L
+
+ .globl mcl_fp_shr1_3L
+ .align 16, 0x90
+ .type mcl_fp_shr1_3L,@function
+mcl_fp_shr1_3L: # @mcl_fp_shr1_3L
+# BB#0:
+ movq 16(%rsi), %rax
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rdx
+ shrdq $1, %rdx, %rcx
+ movq %rcx, (%rdi)
+ shrdq $1, %rax, %rdx
+ movq %rdx, 8(%rdi)
+ shrq %rax
+ movq %rax, 16(%rdi)
+ retq
+.Lfunc_end43:
+ .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L
+
+ .globl mcl_fp_add3L
+ .align 16, 0x90
+ .type mcl_fp_add3L,@function
+mcl_fp_add3L: # @mcl_fp_add3L
+# BB#0:
+ movq 16(%rdx), %r8
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r8
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB44_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r8, 16(%rdi)
+.LBB44_2: # %carry
+ retq
+.Lfunc_end44:
+ .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L
+
+ .globl mcl_fp_addNF3L
+ .align 16, 0x90
+ .type mcl_fp_addNF3L,@function
+mcl_fp_addNF3L: # @mcl_fp_addNF3L
+# BB#0:
+ movq 16(%rdx), %r8
+ movq (%rdx), %r10
+ movq 8(%rdx), %r9
+ addq (%rsi), %r10
+ adcq 8(%rsi), %r9
+ adcq 16(%rsi), %r8
+ movq %r10, %rsi
+ subq (%rcx), %rsi
+ movq %r9, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r8, %rax
+ sbbq 16(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r10, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 16(%rdi)
+ retq
+.Lfunc_end45:
+ .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L
+
+ .globl mcl_fp_sub3L
+ .align 16, 0x90
+ .type mcl_fp_sub3L,@function
+mcl_fp_sub3L: # @mcl_fp_sub3L
+# BB#0:
+ movq 16(%rsi), %r8
+ movq (%rsi), %rax
+ movq 8(%rsi), %r9
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r9
+ sbbq 16(%rdx), %r8
+ movq %rax, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB46_2
+# BB#1: # %nocarry
+ retq
+.LBB46_2: # %carry
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rsi
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r8, %rsi
+ movq %rsi, 16(%rdi)
+ retq
+.Lfunc_end46:
+ .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L
+
+ .globl mcl_fp_subNF3L
+ .align 16, 0x90
+ .type mcl_fp_subNF3L,@function
+mcl_fp_subNF3L: # @mcl_fp_subNF3L
+# BB#0:
+ movq 16(%rsi), %r10
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ subq (%rdx), %r8
+ sbbq 8(%rdx), %r9
+ sbbq 16(%rdx), %r10
+ movq %r10, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rsi
+ shldq $1, %r10, %rsi
+ andq (%rcx), %rsi
+ movq 16(%rcx), %rax
+ andq %rdx, %rax
+ andq 8(%rcx), %rdx
+ addq %r8, %rsi
+ movq %rsi, (%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r10, %rax
+ movq %rax, 16(%rdi)
+ retq
+.Lfunc_end47:
+ .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L
+
+ .globl mcl_fpDbl_add3L
+ .align 16, 0x90
+ .type mcl_fpDbl_add3L,@function
+mcl_fpDbl_add3L: # @mcl_fpDbl_add3L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r10
+ movq 40(%rsi), %r8
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r9
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r14, %r15
+ adcq %r11, %r9
+ adcq %r10, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r15, %rdx
+ subq (%rcx), %rdx
+ movq %r9, %rsi
+ sbbq 8(%rcx), %rsi
+ movq %r8, %rbx
+ sbbq 16(%rcx), %rbx
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %r15, %rdx
+ movq %rdx, 24(%rdi)
+ testb %al, %al
+ cmovneq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ cmovneq %r8, %rbx
+ movq %rbx, 40(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end48:
+ .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L
+
+ .globl mcl_fpDbl_sub3L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub3L,@function
+mcl_fpDbl_sub3L: # @mcl_fpDbl_sub3L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r10
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rax
+ xorl %esi, %esi
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rax
+ movq 24(%rdx), %r15
+ movq 32(%rdx), %r12
+ sbbq 16(%rdx), %r14
+ movq %rbx, (%rdi)
+ movq %rax, 8(%rdi)
+ movq %r14, 16(%rdi)
+ sbbq %r15, %r11
+ sbbq %r12, %r9
+ sbbq %r10, %r8
+ movl $0, %eax
+ sbbq $0, %rax
+ andl $1, %eax
+ movq (%rcx), %rdx
+ cmoveq %rsi, %rdx
+ testb %al, %al
+ movq 16(%rcx), %rax
+ cmoveq %rsi, %rax
+ cmovneq 8(%rcx), %rsi
+ addq %r11, %rdx
+ movq %rdx, 24(%rdi)
+ adcq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r8, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end49:
+ .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L
+
+ .globl mcl_fp_mulUnitPre4L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre4L,@function
+mcl_fp_mulUnitPre4L: # @mcl_fp_mulUnitPre4L
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r14, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r11, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r9, %r10
+ movq %r10, 24(%rdi)
+ adcq $0, %r8
+ movq %r8, 32(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end50:
+ .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L
+
+ .globl mcl_fpDbl_mulPre4L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre4L,@function
+mcl_fpDbl_mulPre4L: # @mcl_fpDbl_mulPre4L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq (%rsi), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 8(%rsi), %r8
+ movq %r8, -64(%rsp) # 8-byte Spill
+ movq (%rdx), %rbx
+ movq %rdx, %rbp
+ mulq %rbx
+ movq %rdx, %r15
+ movq 16(%rsi), %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ movq 24(%rsi), %r11
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %r12
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq %rbx
+ movq %rdx, %r10
+ movq %rax, %r9
+ movq %r8, %rax
+ mulq %rbx
+ movq %rdx, %r13
+ movq %rax, %r8
+ addq %r15, %r8
+ adcq %r9, %r13
+ adcq %r14, %r10
+ adcq $0, %r12
+ movq %rbp, %r9
+ movq 8(%r9), %rbp
+ movq %r11, %rax
+ mulq %rbp
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq %rbp
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %rcx
+ movq -64(%rsp), %r14 # 8-byte Reload
+ movq %r14, %rax
+ mulq %rbp
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %rbx
+ movq -8(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ addq %r8, %rax
+ movq %rax, 8(%rdi)
+ adcq %r13, %rbx
+ adcq %r10, %rcx
+ adcq %r12, %r15
+ sbbq %r13, %r13
+ andl $1, %r13d
+ movq 16(%r9), %rbp
+ movq %r14, %rax
+ mulq %rbp
+ movq %rax, %r12
+ movq %rdx, %r8
+ addq -56(%rsp), %rbx # 8-byte Folded Reload
+ adcq -48(%rsp), %rcx # 8-byte Folded Reload
+ adcq -40(%rsp), %r15 # 8-byte Folded Reload
+ adcq -32(%rsp), %r13 # 8-byte Folded Reload
+ movq %r11, %rax
+ mulq %rbp
+ movq %rdx, %r9
+ movq %rax, %r11
+ movq -24(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, %r14
+ movq %rax, %r10
+ movq -8(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ addq %rbx, %rax
+ movq %rax, 16(%rdi)
+ adcq %r12, %rcx
+ adcq %r15, %r10
+ adcq %r13, %r11
+ sbbq %r13, %r13
+ andl $1, %r13d
+ addq %rdx, %rcx
+ adcq %r8, %r10
+ adcq %r14, %r11
+ adcq %r9, %r13
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rbx
+ movq %rbx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r9
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %r15
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq (%rsi)
+ addq %rcx, %rax
+ movq %rax, 24(%rdi)
+ adcq %r10, %rbp
+ adcq %r11, %r12
+ adcq %r13, %r14
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rbp
+ movq %rbp, 32(%rdi)
+ adcq %r15, %r12
+ movq %r12, 40(%rdi)
+ adcq %r9, %r14
+ movq %r14, 48(%rdi)
+ adcq %r8, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end51:
+ .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L
+
+ .globl mcl_fpDbl_sqrPre4L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre4L,@function
+mcl_fpDbl_sqrPre4L: # @mcl_fpDbl_sqrPre4L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rsi, %r10
+ movq 16(%r10), %r9
+ movq 24(%r10), %r11
+ movq (%r10), %r15
+ movq 8(%r10), %r8
+ movq %r15, %rax
+ mulq %r15
+ movq %rdx, %rbp
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %r8
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq %r8
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %r11, %rax
+ mulq %r15
+ movq %rdx, %rbx
+ movq %rax, %rcx
+ movq %r9, %rax
+ mulq %r15
+ movq %rdx, %rsi
+ movq %rsi, -16(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %r8, %rax
+ mulq %r8
+ movq %rdx, %r13
+ movq %rax, %r14
+ movq %r8, %rax
+ mulq %r15
+ addq %rax, %rbp
+ movq %rdx, %r8
+ adcq %r12, %r8
+ adcq %rsi, %rcx
+ adcq $0, %rbx
+ addq %rax, %rbp
+ movq %rbp, 8(%rdi)
+ adcq %r14, %r8
+ movq -40(%rsp), %rsi # 8-byte Reload
+ adcq %rsi, %rcx
+ adcq -32(%rsp), %rbx # 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq %rdx, %r8
+ adcq %r13, %rcx
+ movq -24(%rsp), %r15 # 8-byte Reload
+ adcq %r15, %rbx
+ adcq -8(%rsp), %rbp # 8-byte Folded Reload
+ movq %r11, %rax
+ mulq %r9
+ movq %rdx, %r14
+ movq %rax, %r11
+ movq %r9, %rax
+ mulq %r9
+ movq %rax, %r9
+ addq %r12, %r8
+ movq %r8, 16(%rdi)
+ adcq %rsi, %rcx
+ adcq %rbx, %r9
+ adcq %rbp, %r11
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -16(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r15, %r9
+ adcq %rdx, %r11
+ adcq %r14, %r12
+ movq 24(%r10), %rbp
+ movq %rbp, %rax
+ mulq 16(%r10)
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq 8(%r10)
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq (%r10)
+ movq %rdx, %r15
+ movq %rax, %rsi
+ movq %rbp, %rax
+ mulq %rbp
+ addq %rcx, %rsi
+ movq %rsi, 24(%rdi)
+ adcq %r9, %rbx
+ adcq %r11, %r14
+ adcq %r12, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r15, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r13, %r14
+ movq %r14, 40(%rdi)
+ adcq %r8, %rax
+ movq %rax, 48(%rdi)
+ adcq %rdx, %rcx
+ movq %rcx, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end52:
+ .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L
+
+ .globl mcl_fp_mont4L
+ .align 16, 0x90
+ .type mcl_fp_mont4L,@function
+mcl_fp_mont4L: # @mcl_fp_mont4L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq (%rdx), %rdi
+ mulq %rdi
+ movq %rax, %r9
+ movq %rdx, %rbp
+ movq 16(%rsi), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rax, %r8
+ movq %rdx, %r10
+ movq (%rsi), %rbx
+ movq %rbx, -72(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rdx, %r14
+ movq %rax, %rsi
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rax, %r11
+ movq %rdx, %r13
+ addq %rsi, %r13
+ adcq %r8, %r14
+ adcq %r9, %r10
+ adcq $0, %rbp
+ movq %rbp, -96(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq (%rcx), %r8
+ movq %r8, -32(%rsp) # 8-byte Spill
+ movq %r11, %rdi
+ imulq %rax, %rdi
+ movq 24(%rcx), %rdx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rsi
+ movq %rsi, -16(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rdx, %r9
+ movq %rax, %r12
+ movq %rdi, %rax
+ mulq %rsi
+ movq %rdx, %rbp
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rdx, %rsi
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq %r8
+ movq %rdx, %rcx
+ addq %r15, %rcx
+ adcq %rbx, %rsi
+ adcq %r12, %rbp
+ adcq $0, %r9
+ addq %r11, %rax
+ adcq %r13, %rcx
+ adcq %r14, %rsi
+ adcq %r10, %rbp
+ adcq -96(%rsp), %r9 # 8-byte Folded Reload
+ sbbq %r13, %r13
+ andl $1, %r13d
+ movq -48(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdi
+ movq %rdi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rax, %r8
+ movq %rdx, %rdi
+ addq %r15, %rdi
+ adcq %r14, %rbx
+ adcq %r11, %r10
+ adcq $0, %r12
+ addq %rcx, %r8
+ adcq %rsi, %rdi
+ adcq %rbp, %rbx
+ adcq %r9, %r10
+ adcq %r13, %r12
+ sbbq %r15, %r15
+ andl $1, %r15d
+ movq %r8, %rsi
+ imulq -24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rsi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ addq %rbp, %r11
+ adcq %r14, %r9
+ adcq -96(%rsp), %rcx # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %r8, %rax
+ adcq %rdi, %r11
+ adcq %rbx, %r9
+ adcq %r10, %rcx
+ adcq %r12, %r13
+ adcq $0, %r15
+ movq -48(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rsi
+ movq %rsi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rax, %r8
+ movq %rdx, %rbp
+ addq %rdi, %rbp
+ adcq %rbx, %r14
+ adcq -96(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r12
+ addq %r11, %r8
+ adcq %r9, %rbp
+ adcq %rcx, %r14
+ adcq %r13, %r10
+ adcq %r15, %r12
+ sbbq %r13, %r13
+ movq %r8, %rsi
+ imulq -24(%rsp), %rsi # 8-byte Folded Reload
+ andl $1, %r13d
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r9
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ addq %r15, %rsi
+ adcq %r11, %rbx
+ adcq %r9, %rcx
+ adcq $0, %rdi
+ addq %r8, %rax
+ adcq %rbp, %rsi
+ adcq %r14, %rbx
+ adcq %r10, %rcx
+ adcq %r12, %rdi
+ adcq $0, %r13
+ movq -48(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rbp
+ movq %rbp, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r15
+ movq %rbp, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %rbp
+ addq %r12, %rbp
+ adcq %r15, %r11
+ adcq %r14, %r10
+ adcq $0, %r8
+ addq %rsi, %r9
+ adcq %rbx, %rbp
+ adcq %rcx, %r11
+ adcq %rdi, %r10
+ adcq %r13, %r8
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq -24(%rsp), %rcx # 8-byte Reload
+ imulq %r9, %rcx
+ movq %rcx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r15
+ movq %rcx, %rax
+ movq -40(%rsp), %r14 # 8-byte Reload
+ mulq %r14
+ movq %rdx, %rdi
+ movq %rax, %r12
+ movq %rcx, %rax
+ movq -32(%rsp), %rcx # 8-byte Reload
+ mulq %rcx
+ addq %r12, %rdx
+ adcq %r15, %rdi
+ adcq -24(%rsp), %r13 # 8-byte Folded Reload
+ adcq $0, %rbx
+ addq %r9, %rax
+ adcq %rbp, %rdx
+ adcq %r11, %rdi
+ adcq %r10, %r13
+ adcq %r8, %rbx
+ adcq $0, %rsi
+ movq %rdx, %rax
+ subq %rcx, %rax
+ movq %rdi, %rcx
+ sbbq %r14, %rcx
+ movq %r13, %r8
+ sbbq -16(%rsp), %r8 # 8-byte Folded Reload
+ movq %rbx, %rbp
+ sbbq -8(%rsp), %rbp # 8-byte Folded Reload
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rbx, %rbp
+ testb %sil, %sil
+ cmovneq %rdx, %rax
+ movq -88(%rsp), %rdx # 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rdi, %rcx
+ movq %rcx, 8(%rdx)
+ cmovneq %r13, %r8
+ movq %r8, 16(%rdx)
+ movq %rbp, 24(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end53:
+ .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L
+
+ .globl mcl_fp_montNF4L
+ .align 16, 0x90
+ .type mcl_fp_montNF4L,@function
+mcl_fp_montNF4L: # @mcl_fp_montNF4L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq (%rdx), %rdi
+ mulq %rdi
+ movq %rax, %r15
+ movq %rdx, %r12
+ movq 16(%rsi), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rax, %r8
+ movq %rdx, %r9
+ movq (%rsi), %rbp
+ movq %rbp, -40(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rdx, %rbx
+ movq %rax, %rsi
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, %r11
+ movq %rdx, %rdi
+ addq %rsi, %rdi
+ adcq %r8, %rbx
+ adcq %r15, %r9
+ adcq $0, %r12
+ movq -8(%rcx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq (%rcx), %r8
+ movq %r8, -64(%rsp) # 8-byte Spill
+ movq %r11, %rsi
+ imulq %rax, %rsi
+ movq 24(%rcx), %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rbp
+ movq %rbp, -72(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rdx, %r15
+ movq %rax, %r13
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %r10
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq %rcx
+ movq %rdx, %r14
+ movq %rax, %rcx
+ movq %rsi, %rax
+ mulq %r8
+ addq %r11, %rax
+ adcq %rdi, %rcx
+ adcq %rbx, %rbp
+ adcq %r9, %r13
+ adcq $0, %r12
+ addq %rdx, %rcx
+ adcq %r14, %rbp
+ adcq %r10, %r13
+ adcq %r15, %r12
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdi
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rax, %rdi
+ movq %rdx, %r9
+ addq %r14, %r9
+ adcq %r11, %r8
+ adcq %r10, %rsi
+ adcq $0, %rbx
+ addq %rcx, %rdi
+ adcq %rbp, %r9
+ adcq %r13, %r8
+ adcq %r12, %rsi
+ adcq $0, %rbx
+ movq %rdi, %rcx
+ imulq -8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r13
+ movq %rcx, %rax
+ movq -80(%rsp), %r15 # 8-byte Reload
+ mulq %r15
+ movq %rdx, %r14
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ addq %rdi, %rax
+ adcq %r9, %rbp
+ adcq %r8, %r13
+ adcq %rsi, %r12
+ adcq $0, %rbx
+ addq %rdx, %rbp
+ adcq %r14, %r13
+ adcq %r11, %r12
+ adcq %r10, %rbx
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdi
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %rdi
+ addq %r14, %rdi
+ adcq %r11, %rcx
+ adcq %r10, %r8
+ adcq $0, %rsi
+ addq %rbp, %r9
+ adcq %r13, %rdi
+ adcq %r12, %rcx
+ adcq %rbx, %r8
+ adcq $0, %rsi
+ movq %r9, %rbx
+ imulq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq %r15
+ movq %rdx, %r14
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ addq %r9, %rax
+ adcq %rdi, %rbp
+ adcq %rcx, %r13
+ adcq %r8, %r12
+ adcq $0, %rsi
+ addq %rdx, %rbp
+ adcq %r14, %r13
+ adcq %r11, %r12
+ adcq %r10, %rsi
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdi
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %rcx
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %rdi
+ addq %r14, %rdi
+ adcq %r11, %r10
+ adcq %rcx, %r8
+ adcq $0, %rbx
+ addq %rbp, %r9
+ adcq %r13, %rdi
+ adcq %r12, %r10
+ adcq %rsi, %r8
+ adcq $0, %rbx
+ movq -8(%rsp), %rsi # 8-byte Reload
+ imulq %r9, %rsi
+ movq %rsi, %rax
+ movq -56(%rsp), %r12 # 8-byte Reload
+ mulq %r12
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq %rsi, %rax
+ movq -72(%rsp), %r14 # 8-byte Reload
+ mulq %r14
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rsi, %rax
+ movq -64(%rsp), %r11 # 8-byte Reload
+ mulq %r11
+ movq %rdx, %r15
+ movq %rax, %rcx
+ movq %rsi, %rax
+ movq -80(%rsp), %rsi # 8-byte Reload
+ mulq %rsi
+ addq %r9, %rcx
+ adcq %rdi, %rax
+ adcq %r10, %rbp
+ adcq %r8, %r13
+ adcq $0, %rbx
+ addq %r15, %rax
+ adcq %rdx, %rbp
+ adcq -16(%rsp), %r13 # 8-byte Folded Reload
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rax, %rcx
+ subq %r11, %rcx
+ movq %rbp, %rdx
+ sbbq %rsi, %rdx
+ movq %r13, %rdi
+ sbbq %r14, %rdi
+ movq %rbx, %rsi
+ sbbq %r12, %rsi
+ cmovsq %rax, %rcx
+ movq -88(%rsp), %rax # 8-byte Reload
+ movq %rcx, (%rax)
+ cmovsq %rbp, %rdx
+ movq %rdx, 8(%rax)
+ cmovsq %r13, %rdi
+ movq %rdi, 16(%rax)
+ cmovsq %rbx, %rsi
+ movq %rsi, 24(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end54:
+ .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L
+
+ .globl mcl_fp_montRed4L
+ .align 16, 0x90
+ .type mcl_fp_montRed4L,@function
+mcl_fp_montRed4L: # @mcl_fp_montRed4L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -56(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq (%rcx), %rdi
+ movq %rdi, -48(%rsp) # 8-byte Spill
+ movq (%rsi), %r12
+ movq %r12, %rbx
+ imulq %rax, %rbx
+ movq %rax, %r9
+ movq 24(%rcx), %rdx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r11
+ movq %rdx, %r8
+ movq 16(%rcx), %rbp
+ movq %rbp, -32(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rbp, %r13
+ movq %rax, %r14
+ movq %rdx, %r10
+ movq 8(%rcx), %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rcx, %rbp
+ movq %rdx, %r15
+ movq %rax, %rcx
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdx, %rbx
+ addq %rcx, %rbx
+ adcq %r14, %r15
+ adcq %r11, %r10
+ adcq $0, %r8
+ movq 56(%rsi), %rcx
+ movq 48(%rsi), %rdx
+ addq %r12, %rax
+ movq 40(%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %r10
+ adcq 32(%rsi), %r8
+ adcq $0, %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, %r12
+ adcq $0, %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ movq %rbx, %rsi
+ imulq %r9, %rsi
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq %r13
+ movq %rdx, %r14
+ movq %rax, %r9
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ movq %rsi, %rax
+ movq -48(%rsp), %r13 # 8-byte Reload
+ mulq %r13
+ movq %rdx, %rsi
+ addq %rbp, %rsi
+ adcq %r9, %rcx
+ adcq -72(%rsp), %r14 # 8-byte Folded Reload
+ adcq $0, %r11
+ addq %rbx, %rax
+ adcq %r15, %rsi
+ adcq %r10, %rcx
+ adcq %r8, %r14
+ adcq -64(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r12
+ movq %r12, -64(%rsp) # 8-byte Spill
+ movq -16(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ adcq $0, %rdi
+ movq %rsi, %rbx
+ imulq -40(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %rax
+ movq -8(%rsp), %r12 # 8-byte Reload
+ mulq %r12
+ movq %rdx, %r8
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r9
+ movq %rbx, %rax
+ mulq %r13
+ movq %rdx, %rbx
+ addq %r9, %rbx
+ adcq -72(%rsp), %r15 # 8-byte Folded Reload
+ adcq -16(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r8
+ addq %rsi, %rax
+ adcq %rcx, %rbx
+ adcq %r14, %r15
+ adcq %r11, %r10
+ adcq -64(%rsp), %r8 # 8-byte Folded Reload
+ adcq $0, %rbp
+ movq %rbp, -16(%rsp) # 8-byte Spill
+ adcq $0, %rdi
+ movq -40(%rsp), %rcx # 8-byte Reload
+ imulq %rbx, %rcx
+ movq %rcx, %rax
+ mulq %r12
+ movq %rdx, %r13
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ movq -32(%rsp), %r14 # 8-byte Reload
+ mulq %r14
+ movq %rdx, %r11
+ movq %rax, %r12
+ movq %rcx, %rax
+ movq %rcx, %r9
+ movq -24(%rsp), %rsi # 8-byte Reload
+ mulq %rsi
+ movq %rdx, %rbp
+ movq %rax, %rcx
+ movq %r9, %rax
+ movq -48(%rsp), %r9 # 8-byte Reload
+ mulq %r9
+ addq %rcx, %rdx
+ adcq %r12, %rbp
+ adcq -40(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rbx, %rax
+ adcq %r15, %rdx
+ adcq %r10, %rbp
+ adcq %r8, %r11
+ adcq -16(%rsp), %r13 # 8-byte Folded Reload
+ adcq $0, %rdi
+ movq %rdx, %rax
+ subq %r9, %rax
+ movq %rbp, %rcx
+ sbbq %rsi, %rcx
+ movq %r11, %rbx
+ sbbq %r14, %rbx
+ movq %r13, %rsi
+ sbbq -8(%rsp), %rsi # 8-byte Folded Reload
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %r13, %rsi
+ testb %dil, %dil
+ cmovneq %rdx, %rax
+ movq -56(%rsp), %rdx # 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rbp, %rcx
+ movq %rcx, 8(%rdx)
+ cmovneq %r11, %rbx
+ movq %rbx, 16(%rdx)
+ movq %rsi, 24(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end55:
+ .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L
+
+ .globl mcl_fp_addPre4L
+ .align 16, 0x90
+ .type mcl_fp_addPre4L,@function
+mcl_fp_addPre4L: # @mcl_fp_addPre4L
+# BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rdx), %rax
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rax
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ adcq %r8, %r9
+ movq %r9, 24(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end56:
+ .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L
+
+ .globl mcl_fp_subPre4L
+ .align 16, 0x90
+ .type mcl_fp_subPre4L,@function
+mcl_fp_subPre4L: # @mcl_fp_subPre4L
+# BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r10, 16(%rdi)
+ sbbq %r8, %r9
+ movq %r9, 24(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end57:
+ .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L
+
+ .globl mcl_fp_shr1_4L
+ .align 16, 0x90
+ .type mcl_fp_shr1_4L,@function
+mcl_fp_shr1_4L: # @mcl_fp_shr1_4L
+# BB#0:
+ movq 24(%rsi), %rax
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rdx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rdx
+ movq %rdx, (%rdi)
+ shrdq $1, %rcx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rax, %rcx
+ movq %rcx, 16(%rdi)
+ shrq %rax
+ movq %rax, 24(%rdi)
+ retq
+.Lfunc_end58:
+ .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L
+
+ .globl mcl_fp_add4L
+ .align 16, 0x90
+ .type mcl_fp_add4L,@function
+mcl_fp_add4L: # @mcl_fp_add4L
+# BB#0:
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %r8
+ movq 16(%rdx), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r9
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r9, 16(%rdi)
+ adcq %r10, %r8
+ movq %r8, 24(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r9
+ sbbq 24(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB59_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r9, 16(%rdi)
+ movq %r8, 24(%rdi)
+.LBB59_2: # %carry
+ retq
+.Lfunc_end59:
+ .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L
+
+ .globl mcl_fp_addNF4L
+ .align 16, 0x90
+ .type mcl_fp_addNF4L,@function
+mcl_fp_addNF4L: # @mcl_fp_addNF4L
+# BB#0:
+ pushq %rbx
+ movq 24(%rdx), %r8
+ movq 16(%rdx), %r9
+ movq (%rdx), %r11
+ movq 8(%rdx), %r10
+ addq (%rsi), %r11
+ adcq 8(%rsi), %r10
+ adcq 16(%rsi), %r9
+ adcq 24(%rsi), %r8
+ movq %r11, %rsi
+ subq (%rcx), %rsi
+ movq %r10, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r9, %rax
+ sbbq 16(%rcx), %rax
+ movq %r8, %rbx
+ sbbq 24(%rcx), %rbx
+ testq %rbx, %rbx
+ cmovsq %r11, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r10, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r9, %rax
+ movq %rax, 16(%rdi)
+ cmovsq %r8, %rbx
+ movq %rbx, 24(%rdi)
+ popq %rbx
+ retq
+.Lfunc_end60:
+ .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L
+
+ .globl mcl_fp_sub4L
+ .align 16, 0x90
+ .type mcl_fp_sub4L,@function
+mcl_fp_sub4L: # @mcl_fp_sub4L
+# BB#0:
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %r8
+ movq 16(%rsi), %r9
+ movq (%rsi), %rax
+ movq 8(%rsi), %r11
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r9
+ movq %rax, (%rdi)
+ movq %r11, 8(%rdi)
+ movq %r9, 16(%rdi)
+ sbbq %r10, %r8
+ movq %r8, 24(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB61_2
+# BB#1: # %nocarry
+ retq
+.LBB61_2: # %carry
+ movq 24(%rcx), %r10
+ movq 8(%rcx), %rsi
+ movq 16(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r8, %r10
+ movq %r10, 24(%rdi)
+ retq
+.Lfunc_end61:
+ .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L
+
+ .globl mcl_fp_subNF4L
+ .align 16, 0x90
+ .type mcl_fp_subNF4L,@function
+mcl_fp_subNF4L: # @mcl_fp_subNF4L
+# BB#0:
+ pushq %rbx
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r8
+ movq (%rsi), %r9
+ movq 8(%rsi), %r10
+ subq (%rdx), %r9
+ sbbq 8(%rdx), %r10
+ sbbq 16(%rdx), %r8
+ sbbq 24(%rdx), %r11
+ movq %r11, %rdx
+ sarq $63, %rdx
+ movq 24(%rcx), %rsi
+ andq %rdx, %rsi
+ movq 16(%rcx), %rax
+ andq %rdx, %rax
+ movq 8(%rcx), %rbx
+ andq %rdx, %rbx
+ andq (%rcx), %rdx
+ addq %r9, %rdx
+ movq %rdx, (%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r8, %rax
+ movq %rax, 16(%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ popq %rbx
+ retq
+.Lfunc_end62:
+ .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L
+
+ .globl mcl_fpDbl_add4L
+ .align 16, 0x90
+ .type mcl_fpDbl_add4L,@function
+mcl_fpDbl_add4L: # @mcl_fpDbl_add4L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r9
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r10
+ movq 48(%rsi), %r12
+ movq 40(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq 24(%rdx), %r15
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %rsi
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r15, %rbp
+ movq %rbp, 24(%rdi)
+ adcq %r14, %rsi
+ adcq %r11, %r13
+ adcq %r10, %r12
+ adcq %r9, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rsi, %rdx
+ subq (%rcx), %rdx
+ movq %r13, %rbp
+ sbbq 8(%rcx), %rbp
+ movq %r12, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r8, %r9
+ sbbq 24(%rcx), %r9
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rsi, %rdx
+ movq %rdx, 32(%rdi)
+ testb %al, %al
+ cmovneq %r13, %rbp
+ movq %rbp, 40(%rdi)
+ cmovneq %r12, %rbx
+ movq %rbx, 48(%rdi)
+ cmovneq %r8, %r9
+ movq %r9, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end63:
+ .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L
+
+ .globl mcl_fpDbl_sub4L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub4L,@function
+mcl_fpDbl_sub4L: # @mcl_fpDbl_sub4L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r9
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq (%rsi), %rbx
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ movq %rbx, (%rdi)
+ movq 8(%rsi), %rbx
+ sbbq 8(%rdx), %rbx
+ movq %rbx, 8(%rdi)
+ movq 16(%rsi), %rbx
+ sbbq 16(%rdx), %rbx
+ movq %rbx, 16(%rdi)
+ movq 24(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 40(%rdx), %r11
+ movq 32(%rdx), %rdx
+ movq %rbx, 24(%rdi)
+ movq 32(%rsi), %r12
+ sbbq %rdx, %r12
+ movq 48(%rsi), %r14
+ movq 40(%rsi), %r15
+ sbbq %r11, %r15
+ sbbq %r10, %r14
+ sbbq %r9, %r8
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ movq 16(%rcx), %rdx
+ cmoveq %rax, %rdx
+ movq 24(%rcx), %rbx
+ cmoveq %rax, %rbx
+ cmovneq 8(%rcx), %rax
+ addq %r12, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r15, %rax
+ movq %rax, 40(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 48(%rdi)
+ adcq %r8, %rbx
+ movq %rbx, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end64:
+ .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L
+
+ .globl mcl_fp_mulUnitPre5L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre5L,@function
+mcl_fp_mulUnitPre5L: # @mcl_fp_mulUnitPre5L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r15
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r12, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r14, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r11, %r15
+ movq %r15, 24(%rdi)
+ adcq %r9, %r10
+ movq %r10, 32(%rdi)
+ adcq $0, %r8
+ movq %r8, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end65:
+ .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L
+
+ .globl mcl_fpDbl_mulPre5L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre5L,@function
+mcl_fpDbl_mulPre5L: # @mcl_fpDbl_mulPre5L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rsi, %r9
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq (%r9), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 8(%r9), %rbx
+ movq %rbx, -48(%rsp) # 8-byte Spill
+ movq (%rdx), %rbp
+ movq %rdx, %r8
+ mulq %rbp
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 16(%r9), %r13
+ movq 24(%r9), %r14
+ movq 32(%r9), %r15
+ movq %rax, (%rdi)
+ movq %r15, %rax
+ mulq %rbp
+ movq %rdx, %r10
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %r14, %rax
+ mulq %rbp
+ movq %rdx, %r12
+ movq %rax, %r11
+ movq %r13, %rax
+ mulq %rbp
+ movq %rdx, %rcx
+ movq %rax, %rsi
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rdx, %rbp
+ movq %rax, %rdi
+ addq -32(%rsp), %rdi # 8-byte Folded Reload
+ adcq %rsi, %rbp
+ adcq %r11, %rcx
+ adcq -40(%rsp), %r12 # 8-byte Folded Reload
+ adcq $0, %r10
+ movq 8(%r8), %r11
+ movq %r15, %rax
+ mulq %r11
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ movq %r14, %rax
+ mulq %r11
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %r13, %rax
+ mulq %r11
+ movq %rdx, %r8
+ movq %rax, %r13
+ movq -48(%rsp), %rax # 8-byte Reload
+ mulq %r11
+ movq %rdx, %r14
+ movq %rax, %rbx
+ movq -24(%rsp), %rax # 8-byte Reload
+ mulq %r11
+ addq %rdi, %rax
+ movq -8(%rsp), %rdi # 8-byte Reload
+ movq %rax, 8(%rdi)
+ adcq %rbp, %rbx
+ adcq %rcx, %r13
+ adcq %r12, %r15
+ adcq %r10, %rsi
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %rdx, %rbx
+ adcq %r14, %r13
+ adcq %r8, %r15
+ adcq -40(%rsp), %rsi # 8-byte Folded Reload
+ adcq -32(%rsp), %rcx # 8-byte Folded Reload
+ movq 32(%r9), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq -16(%rsp), %rdi # 8-byte Reload
+ movq 16(%rdi), %r12
+ mulq %r12
+ movq %rax, %r11
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq 24(%r9), %rax
+ movq %rax, -72(%rsp) # 8-byte Spill
+ mulq %r12
+ movq %rax, %r10
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 16(%r9), %rax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ mulq %r12
+ movq %rax, %r8
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq (%r9), %r14
+ movq 8(%r9), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ mulq %r12
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %r14, %rax
+ mulq %r12
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ addq %rbx, %rax
+ movq -8(%rsp), %rbx # 8-byte Reload
+ movq %rax, 16(%rbx)
+ adcq %r13, %rbp
+ adcq %r15, %r8
+ adcq %rsi, %r10
+ adcq %rcx, %r11
+ sbbq %rcx, %rcx
+ movq 24(%rdi), %rsi
+ movq -40(%rsp), %rax # 8-byte Reload
+ mulq %rsi
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq -56(%rsp), %rax # 8-byte Reload
+ mulq %rsi
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %r14, %rax
+ mulq %rsi
+ movq %rdx, %r15
+ movq %rax, %rdi
+ movq -72(%rsp), %rax # 8-byte Reload
+ mulq %rsi
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq -80(%rsp), %rax # 8-byte Reload
+ mulq %rsi
+ andl $1, %ecx
+ addq -88(%rsp), %rbp # 8-byte Folded Reload
+ adcq -64(%rsp), %r8 # 8-byte Folded Reload
+ adcq -48(%rsp), %r10 # 8-byte Folded Reload
+ adcq -32(%rsp), %r11 # 8-byte Folded Reload
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ addq %rdi, %rbp
+ movq %rbp, 24(%rbx)
+ adcq %r12, %r8
+ adcq %rax, %r10
+ adcq %r14, %r11
+ adcq %r13, %rcx
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq %r15, %r8
+ adcq -56(%rsp), %r10 # 8-byte Folded Reload
+ adcq %rdx, %r11
+ adcq -72(%rsp), %rcx # 8-byte Folded Reload
+ adcq -40(%rsp), %rsi # 8-byte Folded Reload
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdi
+ movq %rdi, %rax
+ mulq 32(%r9)
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq 24(%r9)
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq 16(%r9)
+ movq %rdx, %r14
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq 8(%r9)
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq (%r9)
+ addq %r8, %rax
+ movq -8(%rsp), %rdi # 8-byte Reload
+ movq %rax, 32(%rdi)
+ adcq %r10, %rbp
+ adcq %r11, %rbx
+ adcq %rcx, %r13
+ adcq %rsi, %r15
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rbp
+ movq %rbp, 40(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r14, %r13
+ movq %r13, 56(%rdi)
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, 64(%rdi)
+ adcq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end66:
+ .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L
+
+ .globl mcl_fpDbl_sqrPre5L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre5L,@function
+mcl_fpDbl_sqrPre5L: # @mcl_fpDbl_sqrPre5L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq 32(%rsi), %r11
+ movq (%rsi), %r13
+ movq 8(%rsi), %rbx
+ movq %r11, %rax
+ mulq %rbx
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rbp
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rcx
+ movq %rcx, %rax
+ mulq %rbx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %r11, %rax
+ mulq %r13
+ movq %rdx, %r8
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %r13
+ movq %rdx, %r9
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq %r13
+ movq %rdx, %r10
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq %r13
+ movq %rdx, %rbx
+ movq %rax, %r14
+ movq %r13, %rax
+ mulq %r13
+ movq %rax, (%rdi)
+ addq %r14, %rdx
+ adcq %rbx, %r12
+ adcq %rbp, %r10
+ adcq -72(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r8
+ addq %r14, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r15, %r12
+ adcq -56(%rsp), %r10 # 8-byte Folded Reload
+ adcq -48(%rsp), %r9 # 8-byte Folded Reload
+ adcq -40(%rsp), %r8 # 8-byte Folded Reload
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ addq %rbx, %r12
+ adcq -64(%rsp), %r10 # 8-byte Folded Reload
+ adcq -32(%rsp), %r9 # 8-byte Folded Reload
+ adcq -24(%rsp), %r8 # 8-byte Folded Reload
+ adcq -16(%rsp), %rdi # 8-byte Folded Reload
+ movq %r11, %rax
+ mulq %rcx
+ movq %rax, %r11
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rbx
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rax, %r14
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq (%rsi), %rbp
+ movq %rbp, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rax, %r13
+ addq %r12, %rbp
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq %rbp, 16(%rax)
+ adcq %r10, %r15
+ adcq %r9, %r13
+ adcq %r8, %r14
+ adcq %rdi, %r11
+ sbbq %r10, %r10
+ andl $1, %r10d
+ addq -56(%rsp), %r15 # 8-byte Folded Reload
+ adcq -48(%rsp), %r13 # 8-byte Folded Reload
+ adcq %rdx, %r14
+ adcq -24(%rsp), %r11 # 8-byte Folded Reload
+ adcq -16(%rsp), %r10 # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq -32(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rax, %rdi
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rbp
+ movq %rbp, -16(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rcx
+ movq %rcx, %rax
+ mulq %rbx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, %rbp
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rax, %rbx
+ addq %r15, %rdi
+ movq -8(%rsp), %r15 # 8-byte Reload
+ movq %rdi, 24(%r15)
+ adcq %r13, %r8
+ adcq %r14, %r12
+ adcq %r11, %rbx
+ adcq %r10, %r9
+ sbbq %r10, %r10
+ andl $1, %r10d
+ addq -40(%rsp), %r8 # 8-byte Folded Reload
+ adcq -24(%rsp), %r12 # 8-byte Folded Reload
+ adcq %rbp, %rbx
+ adcq %rdx, %r9
+ adcq -32(%rsp), %r10 # 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %r14
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r11
+ movq -16(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ addq %r8, %rsi
+ movq %rsi, 32(%r15)
+ adcq %r12, %rdi
+ adcq %rbx, %rax
+ adcq %r9, %rbp
+ adcq %r10, %r11
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r13, %rdi
+ movq %rdi, 40(%r15)
+ adcq %r14, %rax
+ movq %rax, 48(%r15)
+ adcq %rdx, %rbp
+ movq %rbp, 56(%r15)
+ adcq -24(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 64(%r15)
+ adcq -32(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 72(%r15)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end67:
+ .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L
+
+ .globl mcl_fp_mont5L
+ .align 16, 0x90
+ .type mcl_fp_mont5L,@function
+mcl_fp_mont5L: # @mcl_fp_mont5L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ pushq %rax
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq (%rdx), %rdi
+ mulq %rdi
+ movq %rax, %r8
+ movq %rdx, %r14
+ movq 24(%rsi), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rax, %r9
+ movq %rdx, %r12
+ movq 16(%rsi), %rax
+ movq %rax, -72(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rax, %r10
+ movq %rdx, %rbp
+ movq (%rsi), %rbx
+ movq %rbx, -80(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rdx, %r11
+ movq %rax, %rsi
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rdx, %r15
+ addq %rsi, %r15
+ adcq %r10, %r11
+ adcq %r9, %rbp
+ movq %rbp, -96(%rsp) # 8-byte Spill
+ adcq %r8, %r12
+ movq %r12, -112(%rsp) # 8-byte Spill
+ adcq $0, %r14
+ movq %r14, -120(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ imulq %rdx, %rbp
+ movq (%rcx), %r9
+ movq %r9, -32(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rdx
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq 24(%rcx), %rsi
+ movq %rsi, -8(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rbx
+ movq %rbx, -16(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rdx, %r14
+ movq %rax, %r13
+ movq %rbp, %rax
+ mulq %rsi
+ movq %rdx, %rdi
+ movq %rax, %r10
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, %rsi
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq %r9
+ movq %rdx, %rbp
+ addq %r12, %rbp
+ adcq %r8, %rsi
+ adcq %r10, %rbx
+ adcq %r13, %rdi
+ adcq $0, %r14
+ addq -128(%rsp), %rax # 8-byte Folded Reload
+ adcq %r15, %rbp
+ adcq %r11, %rsi
+ adcq -96(%rsp), %rbx # 8-byte Folded Reload
+ adcq -112(%rsp), %rdi # 8-byte Folded Reload
+ adcq -120(%rsp), %r14 # 8-byte Folded Reload
+ sbbq %r9, %r9
+ andl $1, %r9d
+ movq -48(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rcx
+ movq %rcx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rax, %r15
+ movq %rdx, %rcx
+ addq %r10, %rcx
+ adcq -120(%rsp), %r8 # 8-byte Folded Reload
+ adcq -112(%rsp), %r12 # 8-byte Folded Reload
+ adcq -96(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rbp, %r15
+ adcq %rsi, %rcx
+ adcq %rbx, %r8
+ adcq %rdi, %r12
+ adcq %r14, %r11
+ adcq %r9, %r13
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %r15, %rsi
+ imulq -40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ addq %rdi, %rbx
+ adcq -128(%rsp), %r10 # 8-byte Folded Reload
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq -112(%rsp), %rbp # 8-byte Folded Reload
+ adcq $0, %r14
+ addq %r15, %rax
+ adcq %rcx, %rbx
+ adcq %r8, %r10
+ adcq %r12, %r9
+ adcq %r11, %rbp
+ adcq %r13, %r14
+ adcq $0, -96(%rsp) # 8-byte Folded Spill
+ movq -48(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rcx
+ movq %rcx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rax, %r12
+ movq %rdx, %r15
+ addq %r8, %r15
+ adcq -128(%rsp), %rdi # 8-byte Folded Reload
+ adcq -120(%rsp), %rsi # 8-byte Folded Reload
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rbx, %r12
+ adcq %r10, %r15
+ adcq %r9, %rdi
+ adcq %rbp, %rsi
+ adcq %r14, %r11
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %r12, %rbp
+ imulq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r10
+ movq %rbp, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ addq %r14, %rbp
+ adcq %r10, %rbx
+ adcq -120(%rsp), %rcx # 8-byte Folded Reload
+ adcq -112(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r8
+ addq %r12, %rax
+ adcq %r15, %rbp
+ adcq %rdi, %rbx
+ adcq %rsi, %rcx
+ adcq %r11, %r9
+ adcq %r13, %r8
+ adcq $0, -96(%rsp) # 8-byte Folded Spill
+ movq -48(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rsi
+ movq %rsi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq -88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rax, %r14
+ movq %rdx, %rsi
+ addq %r12, %rsi
+ adcq %r15, %rdi
+ adcq -120(%rsp), %r11 # 8-byte Folded Reload
+ adcq -112(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rbp, %r14
+ adcq %rbx, %rsi
+ adcq %rcx, %rdi
+ adcq %r9, %r11
+ adcq %r8, %r10
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %r14, %rbp
+ imulq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r8
+ movq %rbp, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ addq %r12, %rbp
+ adcq %r8, %rbx
+ adcq -120(%rsp), %rcx # 8-byte Folded Reload
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ adcq $0, %r9
+ addq %r14, %rax
+ adcq %rsi, %rbp
+ adcq %rdi, %rbx
+ adcq %r11, %rcx
+ adcq %r10, %r15
+ adcq %r13, %r9
+ movq -96(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ movq -48(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rsi
+ movq %rsi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r8
+ addq %rdi, %r8
+ adcq -72(%rsp), %r12 # 8-byte Folded Reload
+ adcq -64(%rsp), %r11 # 8-byte Folded Reload
+ adcq -56(%rsp), %r13 # 8-byte Folded Reload
+ movq -48(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rbp, %r10
+ adcq %rbx, %r8
+ adcq %rcx, %r12
+ adcq %r15, %r11
+ adcq %r9, %r13
+ adcq %r14, %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ sbbq %rcx, %rcx
+ movq -40(%rsp), %rsi # 8-byte Reload
+ imulq %r10, %rsi
+ movq %rsi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r9
+ movq %rsi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ addq %r9, %rdx
+ adcq %r15, %rdi
+ adcq -56(%rsp), %rbp # 8-byte Folded Reload
+ adcq -40(%rsp), %rbx # 8-byte Folded Reload
+ adcq $0, %r14
+ andl $1, %ecx
+ addq %r10, %rax
+ adcq %r8, %rdx
+ adcq %r12, %rdi
+ adcq %r11, %rbp
+ adcq %r13, %rbx
+ adcq -48(%rsp), %r14 # 8-byte Folded Reload
+ adcq $0, %rcx
+ movq %rdx, %rax
+ subq -32(%rsp), %rax # 8-byte Folded Reload
+ movq %rdi, %r8
+ sbbq -24(%rsp), %r8 # 8-byte Folded Reload
+ movq %rbp, %r9
+ sbbq -16(%rsp), %r9 # 8-byte Folded Reload
+ movq %rbx, %r10
+ sbbq -8(%rsp), %r10 # 8-byte Folded Reload
+ movq %r14, %r11
+ sbbq (%rsp), %r11 # 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rbx, %r10
+ testb %cl, %cl
+ cmovneq %rdx, %rax
+ movq -104(%rsp), %rcx # 8-byte Reload
+ movq %rax, (%rcx)
+ cmovneq %rdi, %r8
+ movq %r8, 8(%rcx)
+ cmovneq %rbp, %r9
+ movq %r9, 16(%rcx)
+ movq %r10, 24(%rcx)
+ cmovneq %r14, %r11
+ movq %r11, 32(%rcx)
+ addq $8, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end68:
+ .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L
+
+ .globl mcl_fp_montNF5L
+ .align 16, 0x90
+ .type mcl_fp_montNF5L,@function
+mcl_fp_montNF5L: # @mcl_fp_montNF5L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq (%rdx), %rbx
+ mulq %rbx
+ movq %rax, %r15
+ movq %rdx, %r10
+ movq 24(%rsi), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rax, %r13
+ movq %rdx, %r14
+ movq 16(%rsi), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rax, %r8
+ movq %rdx, %r9
+ movq (%rsi), %rbp
+ movq %rbp, -80(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rdx, %r11
+ movq %rax, %rdi
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdx, %r12
+ addq %rdi, %r12
+ adcq %r8, %r11
+ adcq %r13, %r9
+ adcq %r15, %r14
+ adcq $0, %r10
+ movq -8(%rcx), %rdx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ imulq %rdx, %rsi
+ movq (%rcx), %r8
+ movq %r8, -96(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rdx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rdi
+ movq %rdi, -16(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rbx
+ movq %rbx, -24(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rbp
+ movq %rbp, -72(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %rcx
+ movq %rsi, %rax
+ mulq %rdi
+ movq %rdx, -128(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, %r15
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %r13
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq %r8
+ addq -112(%rsp), %rax # 8-byte Folded Reload
+ adcq %r12, %rbp
+ adcq %r11, %rbx
+ adcq %r9, %rdi
+ adcq %r14, %rcx
+ adcq $0, %r10
+ addq %rdx, %rbp
+ adcq %r13, %rbx
+ adcq %r15, %rdi
+ adcq -128(%rsp), %rcx # 8-byte Folded Reload
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rsi
+ movq %rsi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %r8
+ movq %rsi, %rax
+ mulq -88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r14
+ movq %rsi, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rax, %rsi
+ movq %rdx, %r12
+ addq %r14, %r12
+ adcq %r8, %r11
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rbp, %rsi
+ adcq %rbx, %r12
+ adcq %rdi, %r11
+ adcq %rcx, %r9
+ adcq %r10, %r15
+ adcq $0, %r13
+ movq %rsi, %rdi
+ imulq -32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -96(%rsp) # 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %r12, %r10
+ adcq %r11, %r8
+ adcq %r9, %r14
+ adcq %r15, %rbp
+ adcq $0, %r13
+ addq %rdx, %r10
+ adcq %rbx, %r8
+ adcq %rcx, %r14
+ adcq -120(%rsp), %rbp # 8-byte Folded Reload
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rsi
+ movq %rsi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %rsi
+ addq %r12, %rsi
+ adcq %rbx, %rcx
+ adcq -120(%rsp), %rdi # 8-byte Folded Reload
+ adcq -112(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r15
+ addq %r10, %r11
+ adcq %r8, %rsi
+ adcq %r14, %rcx
+ adcq %rbp, %rdi
+ adcq %r13, %r9
+ adcq $0, %r15
+ movq %r11, %rbx
+ imulq -32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %r10
+ movq %rbx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq -96(%rsp) # 8-byte Folded Reload
+ addq %r11, %rax
+ adcq %rsi, %rbp
+ adcq %rcx, %r10
+ adcq %rdi, %r8
+ adcq %r9, %r13
+ adcq $0, %r15
+ addq %rdx, %rbp
+ adcq %r12, %r10
+ adcq %r14, %r8
+ adcq -120(%rsp), %r13 # 8-byte Folded Reload
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rsi
+ movq %rsi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rax, %r14
+ movq %rdx, %rsi
+ addq %r12, %rsi
+ adcq %rbx, %rcx
+ adcq -120(%rsp), %rdi # 8-byte Folded Reload
+ adcq -112(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r11
+ addq %rbp, %r14
+ adcq %r10, %rsi
+ adcq %r8, %rcx
+ adcq %r13, %rdi
+ adcq %r15, %r9
+ adcq $0, %r11
+ movq %r14, %rbx
+ imulq -32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r10
+ movq %rbx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq -96(%rsp) # 8-byte Folded Reload
+ addq %r14, %rax
+ adcq %rsi, %rbp
+ adcq %rcx, %r10
+ adcq %rdi, %r8
+ adcq %r9, %r13
+ adcq $0, %r11
+ addq %rdx, %rbp
+ adcq %r12, %r10
+ adcq %r15, %r8
+ adcq -120(%rsp), %r13 # 8-byte Folded Reload
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rcx
+ movq %rcx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq -80(%rsp) # 8-byte Folded Reload
+ movq %rax, %r12
+ movq %rdx, %rdi
+ addq %rsi, %rdi
+ adcq -56(%rsp), %r15 # 8-byte Folded Reload
+ adcq -48(%rsp), %r14 # 8-byte Folded Reload
+ adcq -40(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %rbx
+ addq %rbp, %r12
+ adcq %r10, %rdi
+ adcq %r8, %r15
+ adcq %r13, %r14
+ adcq %r11, %r9
+ adcq $0, %rbx
+ movq -32(%rsp), %r8 # 8-byte Reload
+ imulq %r12, %r8
+ movq %r8, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %rcx
+ movq %r8, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %r8, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ movq %r8, %rax
+ movq %r8, %r13
+ movq -96(%rsp), %r10 # 8-byte Reload
+ mulq %r10
+ movq %rdx, %r11
+ movq %rax, %r8
+ movq %r13, %rax
+ movq -72(%rsp), %r13 # 8-byte Reload
+ mulq %r13
+ addq %r12, %r8
+ adcq %rdi, %rax
+ adcq %r15, %rsi
+ adcq %r14, %rbp
+ adcq %r9, %rcx
+ adcq $0, %rbx
+ addq %r11, %rax
+ adcq %rdx, %rsi
+ adcq -48(%rsp), %rbp # 8-byte Folded Reload
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ adcq -32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rax, %r11
+ subq %r10, %r11
+ movq %rsi, %r10
+ sbbq %r13, %r10
+ movq %rbp, %r8
+ sbbq -24(%rsp), %r8 # 8-byte Folded Reload
+ movq %rcx, %r9
+ sbbq -16(%rsp), %r9 # 8-byte Folded Reload
+ movq %rbx, %rdx
+ sbbq -8(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, %rdi
+ sarq $63, %rdi
+ cmovsq %rax, %r11
+ movq -104(%rsp), %rax # 8-byte Reload
+ movq %r11, (%rax)
+ cmovsq %rsi, %r10
+ movq %r10, 8(%rax)
+ cmovsq %rbp, %r8
+ movq %r8, 16(%rax)
+ cmovsq %rcx, %r9
+ movq %r9, 24(%rax)
+ cmovsq %rbx, %rdx
+ movq %rdx, 32(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end69:
+ .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L
+
+ .globl mcl_fp_montRed5L
+ .align 16, 0x90
+ .type mcl_fp_montRed5L,@function
+mcl_fp_montRed5L: # @mcl_fp_montRed5L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq (%rcx), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ movq (%rsi), %r9
+ movq %r9, %rbp
+ imulq %rax, %rbp
+ movq 32(%rcx), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r10
+ movq %rdx, %r13
+ movq 24(%rcx), %rdx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r14
+ movq %rdx, %r11
+ movq 16(%rcx), %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rdx, %r15
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, %r8
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ addq %rbx, %rcx
+ adcq %r12, %r8
+ adcq %r14, %r15
+ adcq %r10, %r11
+ adcq $0, %r13
+ addq %r9, %rax
+ movq 72(%rsi), %rax
+ movq 64(%rsi), %rdx
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r8
+ adcq 24(%rsi), %r15
+ adcq 32(%rsi), %r11
+ adcq 40(%rsi), %r13
+ movq %r13, -88(%rsp) # 8-byte Spill
+ movq 56(%rsi), %rdi
+ movq 48(%rsi), %rsi
+ adcq $0, %rsi
+ movq %rsi, -96(%rsp) # 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ movq %rcx, %rsi
+ movq -64(%rsp), %r9 # 8-byte Reload
+ imulq %r9, %rsi
+ movq %rsi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ addq %rbp, %rsi
+ adcq %rbx, %r13
+ adcq -112(%rsp), %r12 # 8-byte Folded Reload
+ adcq -104(%rsp), %r14 # 8-byte Folded Reload
+ adcq $0, %r10
+ addq %rcx, %rax
+ adcq %r8, %rsi
+ adcq %r15, %r13
+ adcq %r11, %r12
+ adcq -88(%rsp), %r14 # 8-byte Folded Reload
+ adcq -96(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, -72(%rsp) # 8-byte Folded Spill
+ adcq $0, -56(%rsp) # 8-byte Folded Spill
+ adcq $0, -40(%rsp) # 8-byte Folded Spill
+ adcq $0, %rdi
+ movq %rsi, %rcx
+ imulq %r9, %rcx
+ movq %rcx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ addq %r8, %rbp
+ adcq -104(%rsp), %rbx # 8-byte Folded Reload
+ adcq -96(%rsp), %r15 # 8-byte Folded Reload
+ adcq -88(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r9
+ addq %rsi, %rax
+ adcq %r13, %rbp
+ adcq %r12, %rbx
+ adcq %r14, %r15
+ adcq %r10, %r11
+ adcq -72(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, -56(%rsp) # 8-byte Folded Spill
+ adcq $0, -40(%rsp) # 8-byte Folded Spill
+ adcq $0, %rdi
+ movq %rbp, %rcx
+ imulq -64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, %rax
+ movq -48(%rsp), %rsi # 8-byte Reload
+ mulq %rsi
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ addq %r8, %rcx
+ adcq %r10, %r13
+ adcq -96(%rsp), %r12 # 8-byte Folded Reload
+ adcq -88(%rsp), %r14 # 8-byte Folded Reload
+ movq -72(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %rbp, %rax
+ adcq %rbx, %rcx
+ adcq %r15, %r13
+ adcq %r11, %r12
+ adcq %r9, %r14
+ adcq -56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ adcq $0, -40(%rsp) # 8-byte Folded Spill
+ adcq $0, %rdi
+ movq -64(%rsp), %rbx # 8-byte Reload
+ imulq %rcx, %rbx
+ movq %rbx, %rax
+ mulq %rsi
+ movq %rdx, %rsi
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r15
+ movq %rbx, %rax
+ movq %rbx, %r10
+ movq -32(%rsp), %r11 # 8-byte Reload
+ mulq %r11
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %r10, %rax
+ movq -24(%rsp), %r10 # 8-byte Reload
+ mulq %r10
+ addq %r8, %rdx
+ adcq %r15, %rbx
+ adcq -64(%rsp), %rbp # 8-byte Folded Reload
+ adcq -56(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %rsi
+ addq %rcx, %rax
+ adcq %r13, %rdx
+ adcq %r12, %rbx
+ adcq %r14, %rbp
+ adcq -72(%rsp), %r9 # 8-byte Folded Reload
+ adcq -40(%rsp), %rsi # 8-byte Folded Reload
+ adcq $0, %rdi
+ movq %rdx, %rax
+ subq %r10, %rax
+ movq %rbx, %rcx
+ sbbq %r11, %rcx
+ movq %rbp, %r8
+ sbbq -16(%rsp), %r8 # 8-byte Folded Reload
+ movq %r9, %r10
+ sbbq -8(%rsp), %r10 # 8-byte Folded Reload
+ movq %rsi, %r11
+ sbbq -48(%rsp), %r11 # 8-byte Folded Reload
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %rsi, %r11
+ testb %dil, %dil
+ cmovneq %rdx, %rax
+ movq -80(%rsp), %rdx # 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rbx, %rcx
+ movq %rcx, 8(%rdx)
+ cmovneq %rbp, %r8
+ movq %r8, 16(%rdx)
+ cmovneq %r9, %r10
+ movq %r10, 24(%rdx)
+ movq %r11, 32(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end70:
+ .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L
+
+ .globl mcl_fp_addPre5L
+ .align 16, 0x90
+ .type mcl_fp_addPre5L,@function
+mcl_fp_addPre5L: # @mcl_fp_addPre5L
+# BB#0:
+ movq 32(%rdx), %r8
+ movq 24(%rdx), %r9
+ movq 24(%rsi), %r11
+ movq 32(%rsi), %r10
+ movq 16(%rdx), %rcx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rcx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ adcq %r9, %r11
+ movq %r11, 24(%rdi)
+ adcq %r8, %r10
+ movq %r10, 32(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end71:
+ .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L
+
+ .globl mcl_fp_subPre5L
+ .align 16, 0x90
+ .type mcl_fp_subPre5L,@function
+mcl_fp_subPre5L: # @mcl_fp_subPre5L
+# BB#0:
+ pushq %rbx
+ movq 32(%rsi), %r10
+ movq 24(%rdx), %r8
+ movq 32(%rdx), %r9
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %rcx
+ movq %rbx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r8, %r11
+ movq %r11, 24(%rdi)
+ sbbq %r9, %r10
+ movq %r10, 32(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ retq
+.Lfunc_end72:
+ .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L
+
+ .globl mcl_fp_shr1_5L
+ .align 16, 0x90
+ .type mcl_fp_shr1_5L,@function
+mcl_fp_shr1_5L: # @mcl_fp_shr1_5L
+# BB#0:
+ movq 32(%rsi), %r8
+ movq 24(%rsi), %rcx
+ movq 16(%rsi), %rdx
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rax
+ movq %rax, (%rdi)
+ shrdq $1, %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 16(%rdi)
+ shrdq $1, %r8, %rcx
+ movq %rcx, 24(%rdi)
+ shrq %r8
+ movq %r8, 32(%rdi)
+ retq
+.Lfunc_end73:
+ .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L
+
+ .globl mcl_fp_add5L
+ .align 16, 0x90
+ .type mcl_fp_add5L,@function
+mcl_fp_add5L: # @mcl_fp_add5L
+# BB#0:
+ pushq %rbx
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %rbx
+ movq 24(%rsi), %r9
+ movq 32(%rsi), %r8
+ movq 16(%rdx), %r10
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r10
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ adcq %rbx, %r9
+ movq %r9, 24(%rdi)
+ adcq %r11, %r8
+ movq %r8, 32(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r10
+ sbbq 24(%rcx), %r9
+ sbbq 32(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB74_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r9, 24(%rdi)
+ movq %r8, 32(%rdi)
+.LBB74_2: # %carry
+ popq %rbx
+ retq
+.Lfunc_end74:
+ .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L
+
+ .globl mcl_fp_addNF5L
+ .align 16, 0x90
+ .type mcl_fp_addNF5L,@function
+mcl_fp_addNF5L: # @mcl_fp_addNF5L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 32(%rdx), %r8
+ movq 24(%rdx), %r9
+ movq 16(%rdx), %r10
+ movq (%rdx), %r14
+ movq 8(%rdx), %r11
+ addq (%rsi), %r14
+ adcq 8(%rsi), %r11
+ adcq 16(%rsi), %r10
+ adcq 24(%rsi), %r9
+ adcq 32(%rsi), %r8
+ movq %r14, %rsi
+ subq (%rcx), %rsi
+ movq %r11, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r10, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r9, %r15
+ sbbq 24(%rcx), %r15
+ movq %r8, %rax
+ sbbq 32(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r14, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ cmovsq %r9, %r15
+ movq %r15, 24(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 32(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end75:
+ .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L
+
+ .globl mcl_fp_sub5L
+ .align 16, 0x90
+ .type mcl_fp_sub5L,@function
+mcl_fp_sub5L: # @mcl_fp_sub5L
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 32(%rsi), %r8
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r10, 16(%rdi)
+ sbbq %r11, %r9
+ movq %r9, 24(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 32(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB76_2
+# BB#1: # %carry
+ movq 32(%rcx), %r11
+ movq 24(%rcx), %r14
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rbx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %rsi, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r9, %r14
+ movq %r14, 24(%rdi)
+ adcq %r8, %r11
+ movq %r11, 32(%rdi)
+.LBB76_2: # %nocarry
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end76:
+ .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L
+
+ .globl mcl_fp_subNF5L
+ .align 16, 0x90
+ .type mcl_fp_subNF5L,@function
+mcl_fp_subNF5L: # @mcl_fp_subNF5L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 32(%rsi), %r14
+ movq 24(%rsi), %r8
+ movq 16(%rsi), %r9
+ movq (%rsi), %r10
+ movq 8(%rsi), %r11
+ subq (%rdx), %r10
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r9
+ sbbq 24(%rdx), %r8
+ sbbq 32(%rdx), %r14
+ movq %r14, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rsi
+ shldq $1, %r14, %rsi
+ movq 8(%rcx), %rbx
+ andq %rsi, %rbx
+ andq (%rcx), %rsi
+ movq 32(%rcx), %r15
+ andq %rdx, %r15
+ movq 24(%rcx), %rax
+ andq %rdx, %rax
+ rolq %rdx
+ andq 16(%rcx), %rdx
+ addq %r10, %rsi
+ movq %rsi, (%rdi)
+ adcq %r11, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r8, %rax
+ movq %rax, 24(%rdi)
+ adcq %r14, %r15
+ movq %r15, 32(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end77:
+ .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L
+
+ .globl mcl_fpDbl_add5L
+ .align 16, 0x90
+ .type mcl_fpDbl_add5L,@function
+mcl_fpDbl_add5L: # @mcl_fpDbl_add5L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 72(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 64(%rdx), %r11
+ movq 56(%rdx), %r14
+ movq 48(%rdx), %r15
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %r13
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %rbp
+ adcq 32(%rdx), %r13
+ movq 40(%rdx), %r9
+ movq %rbx, (%rdi)
+ movq 72(%rsi), %r8
+ movq %rax, 8(%rdi)
+ movq 64(%rsi), %r10
+ movq %r12, 16(%rdi)
+ movq 56(%rsi), %r12
+ movq %rbp, 24(%rdi)
+ movq 48(%rsi), %rbp
+ movq 40(%rsi), %rbx
+ movq %r13, 32(%rdi)
+ adcq %r9, %rbx
+ adcq %r15, %rbp
+ adcq %r14, %r12
+ adcq %r11, %r10
+ adcq -8(%rsp), %r8 # 8-byte Folded Reload
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rbx, %rax
+ subq (%rcx), %rax
+ movq %rbp, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r12, %r9
+ sbbq 16(%rcx), %r9
+ movq %r10, %r11
+ sbbq 24(%rcx), %r11
+ movq %r8, %r14
+ sbbq 32(%rcx), %r14
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rbx, %rax
+ movq %rax, 40(%rdi)
+ testb %sil, %sil
+ cmovneq %rbp, %rdx
+ movq %rdx, 48(%rdi)
+ cmovneq %r12, %r9
+ movq %r9, 56(%rdi)
+ cmovneq %r10, %r11
+ movq %r11, 64(%rdi)
+ cmovneq %r8, %r14
+ movq %r14, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end78:
+ .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L
+
+ .globl mcl_fpDbl_sub5L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub5L,@function
+mcl_fpDbl_sub5L: # @mcl_fpDbl_sub5L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 72(%rdx), %r9
+ movq 64(%rdx), %r10
+ movq 56(%rdx), %r14
+ movq 16(%rsi), %r8
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %eax, %eax
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r8
+ movq 24(%rsi), %r12
+ sbbq 24(%rdx), %r12
+ movq %r15, (%rdi)
+ movq 32(%rsi), %rbx
+ sbbq 32(%rdx), %rbx
+ movq %r11, 8(%rdi)
+ movq 48(%rdx), %r15
+ movq 40(%rdx), %rdx
+ movq %r8, 16(%rdi)
+ movq 72(%rsi), %r8
+ movq %r12, 24(%rdi)
+ movq 64(%rsi), %r11
+ movq %rbx, 32(%rdi)
+ movq 40(%rsi), %rbp
+ sbbq %rdx, %rbp
+ movq 56(%rsi), %r12
+ movq 48(%rsi), %r13
+ sbbq %r15, %r13
+ sbbq %r14, %r12
+ sbbq %r10, %r11
+ sbbq %r9, %r8
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ movq 16(%rcx), %rdx
+ cmoveq %rax, %rdx
+ movq 8(%rcx), %rbx
+ cmoveq %rax, %rbx
+ movq 32(%rcx), %r9
+ cmoveq %rax, %r9
+ cmovneq 24(%rcx), %rax
+ addq %rbp, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r13, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r12, %rdx
+ movq %rdx, 56(%rdi)
+ adcq %r11, %rax
+ movq %rax, 64(%rdi)
+ adcq %r8, %r9
+ movq %r9, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end79:
+ .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L
+
+ .globl mcl_fp_mulUnitPre6L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre6L,@function
+mcl_fp_mulUnitPre6L: # @mcl_fp_mulUnitPre6L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 40(%rsi)
+ movq %rdx, %r9
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r15
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r13
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %rbp, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r14, %r13
+ movq %r13, 24(%rdi)
+ adcq %r11, %r15
+ movq %r15, 32(%rdi)
+ adcq %r8, %r10
+ movq %r10, 40(%rdi)
+ adcq $0, %r9
+ movq %r9, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end80:
+ .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L
+
+ .globl mcl_fpDbl_mulPre6L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre6L,@function
+mcl_fpDbl_mulPre6L: # @mcl_fpDbl_mulPre6L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rsi, %r8
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq (%r8), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 8(%r8), %r13
+ movq %r13, -72(%rsp) # 8-byte Spill
+ movq (%rdx), %rbx
+ mulq %rbx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 16(%r8), %rbp
+ movq %rbp, -64(%rsp) # 8-byte Spill
+ movq 24(%r8), %rsi
+ movq %rsi, -48(%rsp) # 8-byte Spill
+ movq 32(%r8), %r10
+ movq 40(%r8), %r11
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %rcx
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %r10, %rax
+ mulq %rbx
+ movq %rdx, %r12
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, %r9
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, %rbp
+ movq %rax, %r15
+ movq %r13, %rax
+ mulq %rbx
+ movq %rdx, %r13
+ movq %rax, %rsi
+ addq -32(%rsp), %rsi # 8-byte Folded Reload
+ adcq %r15, %r13
+ adcq %r14, %rbp
+ adcq %rdi, %r9
+ adcq -40(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, %rdi
+ adcq $0, %rcx
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ movq -16(%rsp), %r15 # 8-byte Reload
+ movq 8(%r15), %rcx
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r11
+ movq %r10, %rax
+ mulq %rcx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq -48(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq -64(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %rbx
+ movq -72(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq -24(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ addq %rsi, %rax
+ movq -8(%rsp), %rcx # 8-byte Reload
+ movq %rax, 8(%rcx)
+ adcq %r13, %r10
+ adcq %rbp, %rbx
+ adcq %r9, %r14
+ adcq %rdi, %r12
+ adcq -56(%rsp), %r11 # 8-byte Folded Reload
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ addq %rdx, %r10
+ adcq -72(%rsp), %rbx # 8-byte Folded Reload
+ adcq -64(%rsp), %r14 # 8-byte Folded Reload
+ adcq -48(%rsp), %r12 # 8-byte Folded Reload
+ adcq -40(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, -96(%rsp) # 8-byte Spill
+ adcq -32(%rsp), %rdi # 8-byte Folded Reload
+ movq 40(%r8), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 16(%r15), %rcx
+ mulq %rcx
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 32(%r8), %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %r15
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq 24(%r8), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %r11
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq 16(%r8), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %rbp
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq (%r8), %rsi
+ movq %rsi, -72(%rsp) # 8-byte Spill
+ movq 8(%r8), %rax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rdx, %r13
+ movq %rax, %r9
+ movq %rsi, %rax
+ mulq %rcx
+ addq %r10, %rax
+ movq -8(%rsp), %r10 # 8-byte Reload
+ movq %rax, 16(%r10)
+ adcq %rbx, %r9
+ adcq %r14, %rbp
+ adcq %r12, %r11
+ adcq -96(%rsp), %r15 # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ adcq %rdi, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %rdx, %r9
+ adcq %r13, %rbp
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ adcq -104(%rsp), %r15 # 8-byte Folded Reload
+ adcq -88(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -40(%rsp) # 8-byte Spill
+ adcq -32(%rsp), %rcx # 8-byte Folded Reload
+ movq -16(%rsp), %rdi # 8-byte Reload
+ movq 24(%rdi), %rbx
+ movq -24(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq -48(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq -56(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq -64(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq -80(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq -72(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ addq %r9, %rax
+ movq %rax, 24(%r10)
+ adcq %rbp, %r13
+ adcq %r11, %r12
+ adcq %r15, %r14
+ movq -24(%rsp), %rbp # 8-byte Reload
+ adcq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq -32(%rsp), %rax # 8-byte Reload
+ adcq %rcx, %rax
+ sbbq %r10, %r10
+ andl $1, %r10d
+ addq %rdx, %r13
+ adcq %rsi, %r12
+ adcq -64(%rsp), %r14 # 8-byte Folded Reload
+ adcq -56(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, -24(%rsp) # 8-byte Spill
+ adcq -48(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -32(%rsp) # 8-byte Spill
+ adcq -88(%rsp), %r10 # 8-byte Folded Reload
+ movq 40(%r8), %rax
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq 32(%rdi), %rcx
+ movq 32(%r8), %rbx
+ movq %rbx, -112(%rsp) # 8-byte Spill
+ movq 24(%r8), %rsi
+ movq %rsi, -64(%rsp) # 8-byte Spill
+ movq 16(%r8), %rdi
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq (%r8), %r15
+ movq 8(%r8), %r9
+ mulq %rcx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq %rsi, %rax
+ mulq %rcx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %r9, %rax
+ mulq %rcx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %r15, %rax
+ mulq %rcx
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ addq %r13, %rax
+ movq -8(%rsp), %r13 # 8-byte Reload
+ movq %rax, 32(%r13)
+ adcq %r12, %rbp
+ adcq %r14, %rdi
+ adcq -24(%rsp), %rbx # 8-byte Folded Reload
+ adcq -32(%rsp), %r8 # 8-byte Folded Reload
+ adcq %r10, %r11
+ movq -16(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rcx
+ sbbq %rsi, %rsi
+ movq -72(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq -112(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq %r9, %rax
+ mulq %rcx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq %r15, %rax
+ mulq %rcx
+ movq %rdx, %r12
+ movq %rax, %r9
+ movq -64(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq -104(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ andl $1, %esi
+ addq -96(%rsp), %rbp # 8-byte Folded Reload
+ adcq -88(%rsp), %rdi # 8-byte Folded Reload
+ adcq -80(%rsp), %rbx # 8-byte Folded Reload
+ adcq -56(%rsp), %r8 # 8-byte Folded Reload
+ adcq -48(%rsp), %r11 # 8-byte Folded Reload
+ adcq -40(%rsp), %rsi # 8-byte Folded Reload
+ addq %r9, %rbp
+ movq %rbp, 40(%r13)
+ adcq %r10, %rdi
+ adcq %rax, %rbx
+ adcq %r15, %r8
+ adcq %r14, %r11
+ adcq -72(%rsp), %rsi # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r12, %rdi
+ movq %rdi, 48(%r13)
+ adcq -32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 56(%r13)
+ adcq %rdx, %r8
+ movq %r8, 64(%r13)
+ adcq -64(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 72(%r13)
+ adcq -24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 80(%r13)
+ adcq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 88(%r13)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end81:
+ .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L
+
+ .globl mcl_fpDbl_sqrPre6L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre6L,@function
+mcl_fpDbl_sqrPre6L: # @mcl_fpDbl_sqrPre6L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r8
+ movq %r8, -56(%rsp) # 8-byte Spill
+ movq 24(%rsi), %r10
+ movq %r10, -40(%rsp) # 8-byte Spill
+ movq 32(%rsi), %r9
+ movq %r9, -32(%rsp) # 8-byte Spill
+ movq 40(%rsi), %r11
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rbx
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %rbp
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %rdi
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq %rcx
+ movq %rdx, %r14
+ movq %rax, %r9
+ movq %r10, %rax
+ mulq %rcx
+ movq %rdx, %r12
+ movq %rax, %r10
+ movq %r8, %rax
+ mulq %rcx
+ movq %rdx, %r13
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ addq %r8, %rbp
+ adcq %rdx, %r15
+ adcq %r10, %r13
+ adcq %r9, %r12
+ adcq -16(%rsp), %r14 # 8-byte Folded Reload
+ adcq $0, %rdi
+ movq %rdi, -48(%rsp) # 8-byte Spill
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, %rcx
+ movq -32(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r9
+ movq -40(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq -56(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, %rdi
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rax, %rbx
+ addq %r8, %rbp
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq %rbp, 8(%rax)
+ adcq %r15, %rbx
+ adcq %r13, %r11
+ adcq %r12, %r10
+ adcq %r14, %r9
+ movq %rcx, %rax
+ adcq -48(%rsp), %rax # 8-byte Folded Reload
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq -24(%rsp), %rbx # 8-byte Folded Reload
+ adcq %rdx, %r11
+ adcq %rdi, %r10
+ adcq -40(%rsp), %r9 # 8-byte Folded Reload
+ adcq -32(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -72(%rsp) # 8-byte Spill
+ adcq -16(%rsp), %rcx # 8-byte Folded Reload
+ movq 40(%rsi), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rdi
+ mulq %rdi
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rax, %r12
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rbp
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, %r14
+ movq %r14, -96(%rsp) # 8-byte Spill
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq (%rsi), %r15
+ movq %r15, -48(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq %r15, %rax
+ mulq %rdi
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq %rdi
+ movq %rax, %r13
+ addq %rbx, %r15
+ movq -8(%rsp), %rbx # 8-byte Reload
+ movq %r15, 16(%rbx)
+ adcq %r11, %r8
+ adcq %r10, %r13
+ adcq %r14, %r9
+ adcq -72(%rsp), %r12 # 8-byte Folded Reload
+ movq -80(%rsp), %r14 # 8-byte Reload
+ adcq %rcx, %r14
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq -104(%rsp), %r8 # 8-byte Folded Reload
+ adcq -88(%rsp), %r13 # 8-byte Folded Reload
+ adcq %rdx, %r9
+ adcq -24(%rsp), %r12 # 8-byte Folded Reload
+ adcq -56(%rsp), %r14 # 8-byte Folded Reload
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq -16(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq -32(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq -64(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, %r11
+ movq -48(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %rbp, %rax
+ mulq %rbp
+ movq %rax, %r15
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ addq %r8, %rdi
+ movq %rdi, 24(%rbx)
+ adcq %r13, %r11
+ adcq -96(%rsp), %r9 # 8-byte Folded Reload
+ adcq %r12, %r15
+ adcq %r14, %r10
+ movq -16(%rsp), %r12 # 8-byte Reload
+ adcq %rcx, %r12
+ sbbq %rcx, %rcx
+ movq (%rsi), %r8
+ andl $1, %ecx
+ movq 8(%rsi), %rbx
+ movq 40(%rsi), %rdi
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq %rdi
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rbp
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq %rbp
+ movq %rax, %r14
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ addq -88(%rsp), %r11 # 8-byte Folded Reload
+ adcq -80(%rsp), %r9 # 8-byte Folded Reload
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ adcq -72(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, -16(%rsp) # 8-byte Spill
+ adcq -56(%rsp), %rcx # 8-byte Folded Reload
+ movq 24(%rsi), %rbx
+ movq 16(%rsi), %r8
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ movq %r8, %rax
+ mulq %rdi
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq %rbp
+ movq %rdx, %rbx
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq %rbp
+ movq %rdx, %r12
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq %rdi
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %rbp, %rax
+ mulq %rbp
+ addq %r14, %r11
+ movq -8(%rsp), %r14 # 8-byte Reload
+ movq %r11, 32(%r14)
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq %r15, %r13
+ adcq %r10, %rsi
+ adcq -16(%rsp), %rax # 8-byte Folded Reload
+ adcq %r8, %rcx
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq -112(%rsp), %r9 # 8-byte Folded Reload
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ adcq %rbx, %rsi
+ adcq -104(%rsp), %rax # 8-byte Folded Reload
+ adcq %rdx, %rcx
+ adcq %r12, %rbp
+ addq -64(%rsp), %r9 # 8-byte Folded Reload
+ movq %r14, %rbx
+ movq %r9, 40(%rbx)
+ adcq -48(%rsp), %r13 # 8-byte Folded Reload
+ adcq -88(%rsp), %rsi # 8-byte Folded Reload
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ adcq %r8, %rcx
+ adcq %rdi, %rbp
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ addq -40(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 48(%rbx)
+ adcq -32(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 56(%rbx)
+ adcq -72(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 64(%rbx)
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 72(%rbx)
+ adcq %r12, %rbp
+ movq %rbp, 80(%rbx)
+ adcq -56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 88(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end82:
+ .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L
+
+ .globl mcl_fp_mont6L
+ .align 16, 0x90
+ .type mcl_fp_mont6L,@function
+mcl_fp_mont6L: # @mcl_fp_mont6L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $56, %rsp
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rdi, -96(%rsp) # 8-byte Spill
+ movq 40(%rsi), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq (%rdx), %rbx
+ mulq %rbx
+ movq %rax, %r8
+ movq %rdx, %r14
+ movq 32(%rsi), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rax, %r9
+ movq %rdx, %r15
+ movq 24(%rsi), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rbp
+ movq %rbp, -48(%rsp) # 8-byte Spill
+ movq (%rsi), %r12
+ movq %r12, -32(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rsi
+ movq %rsi, -40(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rdx, %rdi
+ movq %rax, %r10
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, %rbp
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq %r12, %rax
+ mulq %rbx
+ movq %rax, -120(%rsp) # 8-byte Spill
+ addq %r13, %rdx
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ adcq %r11, %rsi
+ movq %rsi, -104(%rsp) # 8-byte Spill
+ adcq %r10, %rbp
+ movq %rbp, -88(%rsp) # 8-byte Spill
+ adcq %r9, %rdi
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ adcq %r8, %r15
+ movq %r15, -72(%rsp) # 8-byte Spill
+ adcq $0, %r14
+ movq %r14, -64(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ imulq %rdx, %rdi
+ movq (%rcx), %r9
+ movq %r9, 8(%rsp) # 8-byte Spill
+ movq 40(%rcx), %rdx
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rbp
+ movq %rbp, 32(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rsi
+ movq %rsi, 16(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, 24(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rdx, %r11
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq %rbp
+ movq %rdx, %r13
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq %rbx
+ movq %rdx, %rbp
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq %rsi
+ movq %rdx, %rbx
+ movq %rax, %r12
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rdx, %r8
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq %r9
+ movq %rdx, %r9
+ addq %r15, %r9
+ adcq %r12, %r8
+ adcq %r14, %rbx
+ adcq %r10, %rbp
+ adcq -128(%rsp), %r13 # 8-byte Folded Reload
+ adcq $0, %r11
+ addq -120(%rsp), %rax # 8-byte Folded Reload
+ adcq -112(%rsp), %r9 # 8-byte Folded Reload
+ adcq -104(%rsp), %r8 # 8-byte Folded Reload
+ adcq -88(%rsp), %rbx # 8-byte Folded Reload
+ adcq -80(%rsp), %rbp # 8-byte Folded Reload
+ adcq -72(%rsp), %r13 # 8-byte Folded Reload
+ adcq -64(%rsp), %r11 # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdi
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %rcx
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rax, %r12
+ movq %rdx, %rdi
+ addq %r10, %rdi
+ adcq %rcx, %rsi
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ adcq -104(%rsp), %r14 # 8-byte Folded Reload
+ movq -72(%rsp), %rcx # 8-byte Reload
+ adcq -88(%rsp), %rcx # 8-byte Folded Reload
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %r9, %r12
+ adcq %r8, %rdi
+ adcq %rbx, %rsi
+ adcq %rbp, %r15
+ adcq %r13, %r14
+ adcq %r11, %rcx
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -64(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %r12, %rbx
+ imulq (%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ addq %r11, %r9
+ adcq %r13, %rbp
+ adcq -120(%rsp), %rcx # 8-byte Folded Reload
+ adcq -112(%rsp), %r10 # 8-byte Folded Reload
+ adcq -104(%rsp), %r8 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r12, %rax
+ adcq %rdi, %r9
+ adcq %rsi, %rbp
+ adcq %r15, %rcx
+ adcq %r14, %r10
+ adcq -72(%rsp), %r8 # 8-byte Folded Reload
+ adcq -64(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ adcq $0, -88(%rsp) # 8-byte Folded Spill
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdi
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rax, %r13
+ movq %rdx, %rdi
+ addq %r15, %rdi
+ adcq %r11, %rsi
+ adcq %rbx, %r12
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ movq -72(%rsp), %rdx # 8-byte Reload
+ adcq -104(%rsp), %rdx # 8-byte Folded Reload
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %r9, %r13
+ adcq %rbp, %rdi
+ adcq %rcx, %rsi
+ adcq %r10, %r12
+ adcq %r8, %r14
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ adcq -88(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -64(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %r13, %rbp
+ imulq (%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %rbp, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r8
+ movq %rbp, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ addq %r8, %r9
+ adcq %r10, %rcx
+ adcq -120(%rsp), %rbx # 8-byte Folded Reload
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ adcq -104(%rsp), %r11 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r13, %rax
+ adcq %rdi, %r9
+ adcq %rsi, %rcx
+ adcq %r12, %rbx
+ adcq %r14, %r15
+ adcq -72(%rsp), %r11 # 8-byte Folded Reload
+ adcq -64(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq -88(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdi
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rax, %r12
+ movq %rdx, %rdi
+ addq %r13, %rdi
+ adcq %r8, %rsi
+ adcq -112(%rsp), %r10 # 8-byte Folded Reload
+ adcq -104(%rsp), %r14 # 8-byte Folded Reload
+ movq -72(%rsp), %rdx # 8-byte Reload
+ adcq -88(%rsp), %rdx # 8-byte Folded Reload
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %r9, %r12
+ adcq %rcx, %rdi
+ adcq %rbx, %rsi
+ adcq %r15, %r10
+ adcq %r11, %r14
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ adcq %rbp, %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %r12, %rbp
+ imulq (%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r13
+ movq %rbp, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ addq %r9, %r8
+ adcq %r13, %rcx
+ adcq -120(%rsp), %rbx # 8-byte Folded Reload
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ adcq -104(%rsp), %r11 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r12, %rax
+ adcq %rdi, %r8
+ adcq %rsi, %rcx
+ adcq %r10, %rbx
+ adcq %r14, %r15
+ adcq -72(%rsp), %r11 # 8-byte Folded Reload
+ adcq -64(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ adcq $0, -88(%rsp) # 8-byte Folded Spill
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rsi
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %r13
+ addq %r10, %r13
+ adcq %r12, %r14
+ adcq -120(%rsp), %rdi # 8-byte Folded Reload
+ adcq -112(%rsp), %rbp # 8-byte Folded Reload
+ movq -72(%rsp), %rdx # 8-byte Reload
+ adcq -104(%rsp), %rdx # 8-byte Folded Reload
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %r8, %r9
+ adcq %rcx, %r13
+ adcq %rbx, %r14
+ adcq %r15, %rdi
+ adcq %r11, %rbp
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ adcq -88(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -64(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %r9, %rsi
+ imulq (%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rcx
+ movq %rsi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ addq %rcx, %r8
+ adcq %rbx, %r12
+ adcq -120(%rsp), %r15 # 8-byte Folded Reload
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r9, %rax
+ adcq %r13, %r8
+ adcq %r14, %r12
+ adcq %rdi, %r15
+ adcq %rbp, %r11
+ adcq -72(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, -72(%rsp) # 8-byte Spill
+ adcq -64(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq -88(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rcx
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rax, %r13
+ movq %rdx, %rbp
+ addq %rdi, %rbp
+ adcq %rsi, %r14
+ adcq %r9, %r10
+ movq -8(%rsp), %rax # 8-byte Reload
+ adcq -24(%rsp), %rax # 8-byte Folded Reload
+ movq -16(%rsp), %rdx # 8-byte Reload
+ adcq -88(%rsp), %rdx # 8-byte Folded Reload
+ movq -64(%rsp), %rsi # 8-byte Reload
+ adcq $0, %rsi
+ addq %r8, %r13
+ movq %r13, -40(%rsp) # 8-byte Spill
+ adcq %r12, %rbp
+ adcq %r15, %r14
+ movq %r14, -24(%rsp) # 8-byte Spill
+ adcq %r11, %r10
+ movq %r10, -32(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -8(%rsp) # 8-byte Spill
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ adcq %rbx, %rsi
+ movq %rsi, -64(%rsp) # 8-byte Spill
+ sbbq %rcx, %rcx
+ movq (%rsp), %r9 # 8-byte Reload
+ imulq %r13, %r9
+ andl $1, %ecx
+ movq %r9, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, (%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ movq 8(%rsp), %r13 # 8-byte Reload
+ mulq %r13
+ movq %rdx, %r15
+ movq %rax, %r12
+ movq %r9, %rax
+ movq 16(%rsp), %r14 # 8-byte Reload
+ mulq %r14
+ movq %rdx, %rsi
+ movq %rax, %r11
+ movq %r9, %rax
+ movq 24(%rsp), %r10 # 8-byte Reload
+ mulq %r10
+ addq %r15, %rax
+ adcq %r11, %rdx
+ adcq -56(%rsp), %rsi # 8-byte Folded Reload
+ adcq -48(%rsp), %rdi # 8-byte Folded Reload
+ adcq (%rsp), %rbx # 8-byte Folded Reload
+ adcq $0, %r8
+ addq -40(%rsp), %r12 # 8-byte Folded Reload
+ adcq %rbp, %rax
+ adcq -24(%rsp), %rdx # 8-byte Folded Reload
+ adcq -32(%rsp), %rsi # 8-byte Folded Reload
+ adcq -8(%rsp), %rdi # 8-byte Folded Reload
+ adcq -16(%rsp), %rbx # 8-byte Folded Reload
+ adcq -64(%rsp), %r8 # 8-byte Folded Reload
+ adcq $0, %rcx
+ movq %rax, %rbp
+ subq %r13, %rbp
+ movq %rdx, %r9
+ sbbq %r10, %r9
+ movq %rsi, %r10
+ sbbq %r14, %r10
+ movq %rdi, %r11
+ sbbq 40(%rsp), %r11 # 8-byte Folded Reload
+ movq %rbx, %r14
+ sbbq 32(%rsp), %r14 # 8-byte Folded Reload
+ movq %r8, %r15
+ sbbq 48(%rsp), %r15 # 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rdi, %r11
+ testb %cl, %cl
+ cmovneq %rax, %rbp
+ movq -96(%rsp), %rax # 8-byte Reload
+ movq %rbp, (%rax)
+ cmovneq %rdx, %r9
+ movq %r9, 8(%rax)
+ cmovneq %rsi, %r10
+ movq %r10, 16(%rax)
+ movq %r11, 24(%rax)
+ cmovneq %rbx, %r14
+ movq %r14, 32(%rax)
+ cmovneq %r8, %r15
+ movq %r15, 40(%rax)
+ addq $56, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end83:
+ .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L
+
+ .globl mcl_fp_montNF6L
+ .align 16, 0x90
+ .type mcl_fp_montNF6L,@function
+mcl_fp_montNF6L: # @mcl_fp_montNF6L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $40, %rsp
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ movq 40(%rsi), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq (%rdx), %rbx
+ mulq %rbx
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq %rdx, %r13
+ movq 32(%rsi), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rax, %r10
+ movq %rdx, %r9
+ movq 24(%rsi), %rax
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rbp
+ movq %rbp, -64(%rsp) # 8-byte Spill
+ movq (%rsi), %rdi
+ movq %rdi, -48(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rsi
+ movq %rsi, -56(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rdx, %r11
+ movq %rax, %r8
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, %r14
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq %rbx
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rdx, %rbx
+ addq %rbp, %rbx
+ adcq %r15, %r12
+ adcq %r8, %r14
+ adcq %r10, %r11
+ adcq 32(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, -96(%rsp) # 8-byte Spill
+ adcq $0, %r13
+ movq %r13, -80(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq %rax, %r9
+ imulq %rdx, %r9
+ movq (%rcx), %r8
+ movq %r8, 8(%rsp) # 8-byte Spill
+ movq 40(%rcx), %rdx
+ movq %rdx, 32(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rsi
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rbp
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rdi
+ movq %rdi, -40(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq %rdx
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq %r9, %rax
+ mulq %rsi
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %r9, %rax
+ mulq %rbp
+ movq %rdx, -128(%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ movq %r9, %rax
+ mulq %rdi
+ movq %rdx, %r13
+ movq %rax, %rdi
+ movq %r9, %rax
+ mulq %rcx
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ movq %r9, %rax
+ mulq %r8
+ addq -104(%rsp), %rax # 8-byte Folded Reload
+ adcq %rbx, %rbp
+ adcq %r12, %rdi
+ adcq %r14, %rsi
+ adcq %r11, %r15
+ adcq -96(%rsp), %r10 # 8-byte Folded Reload
+ movq -80(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rbp
+ adcq %rcx, %rdi
+ adcq %r13, %rsi
+ adcq -128(%rsp), %r15 # 8-byte Folded Reload
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ adcq -112(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rcx
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rax, %r13
+ movq %rdx, %rcx
+ addq %r8, %rcx
+ adcq -120(%rsp), %rbx # 8-byte Folded Reload
+ adcq -112(%rsp), %r12 # 8-byte Folded Reload
+ adcq -104(%rsp), %r11 # 8-byte Folded Reload
+ adcq -96(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rbp, %r13
+ adcq %rdi, %rcx
+ adcq %rsi, %rbx
+ adcq %r15, %r12
+ adcq %r10, %r11
+ adcq -80(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r14
+ movq %r13, %rsi
+ imulq (%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq %rsi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ addq %r13, %rax
+ adcq %rcx, %r15
+ adcq %rbx, %r10
+ adcq %r12, %r8
+ adcq %r11, %rbp
+ adcq %r9, %rdi
+ adcq $0, %r14
+ addq %rdx, %r15
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ adcq -112(%rsp), %r8 # 8-byte Folded Reload
+ adcq -104(%rsp), %rbp # 8-byte Folded Reload
+ adcq -80(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ adcq -96(%rsp), %r14 # 8-byte Folded Reload
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rsi
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %rsi
+ addq %rdi, %rsi
+ adcq -120(%rsp), %rbx # 8-byte Folded Reload
+ adcq -112(%rsp), %r12 # 8-byte Folded Reload
+ adcq -104(%rsp), %rcx # 8-byte Folded Reload
+ adcq -96(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r13
+ addq %r15, %r9
+ adcq %r10, %rsi
+ adcq %r8, %rbx
+ adcq %rbp, %r12
+ adcq -80(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r14, %r11
+ adcq $0, %r13
+ movq %r9, %r8
+ imulq (%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %r8, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq %r8, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq %r8, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %rdi
+ movq %r8, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ addq %r9, %rax
+ adcq %rsi, %rdi
+ adcq %rbx, %r14
+ adcq %r12, %r10
+ adcq %rcx, %r15
+ movq -80(%rsp), %rax # 8-byte Reload
+ adcq %r11, %rax
+ adcq $0, %r13
+ addq %rdx, %rdi
+ adcq %rbp, %r14
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ adcq -96(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, -96(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -80(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %r13 # 8-byte Folded Reload
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rbp
+ movq %rbp, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rax, %r8
+ movq %rdx, %rbp
+ addq %r9, %rbp
+ adcq %r12, %rbx
+ adcq -120(%rsp), %rsi # 8-byte Folded Reload
+ adcq -112(%rsp), %rcx # 8-byte Folded Reload
+ adcq -104(%rsp), %r11 # 8-byte Folded Reload
+ adcq $0, %r15
+ addq %rdi, %r8
+ adcq %r14, %rbp
+ adcq %r10, %rbx
+ adcq -96(%rsp), %rsi # 8-byte Folded Reload
+ adcq -80(%rsp), %rcx # 8-byte Folded Reload
+ adcq %r13, %r11
+ adcq $0, %r15
+ movq %r8, %r14
+ imulq (%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, %r9
+ movq %r14, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq %r14, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq %r14, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %r14, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %r14, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ addq %r8, %rax
+ adcq %rbp, %rdi
+ adcq %rbx, %r12
+ adcq %rsi, %r10
+ adcq %rcx, %r13
+ adcq %r11, %r9
+ adcq $0, %r15
+ addq %rdx, %rdi
+ adcq -120(%rsp), %r12 # 8-byte Folded Reload
+ adcq -112(%rsp), %r10 # 8-byte Folded Reload
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, -96(%rsp) # 8-byte Spill
+ adcq -80(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, -80(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %r15 # 8-byte Folded Reload
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rcx
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r13
+ movq %rcx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %rbp
+ addq %r13, %rbp
+ adcq -128(%rsp), %rbx # 8-byte Folded Reload
+ adcq -120(%rsp), %rsi # 8-byte Folded Reload
+ adcq -112(%rsp), %r8 # 8-byte Folded Reload
+ adcq -104(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rdi, %r11
+ adcq %r12, %rbp
+ adcq %r10, %rbx
+ adcq -96(%rsp), %rsi # 8-byte Folded Reload
+ adcq -80(%rsp), %r8 # 8-byte Folded Reload
+ adcq %r15, %r9
+ adcq $0, %r14
+ movq %r11, %rcx
+ imulq (%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ addq %r11, %rax
+ adcq %rbp, %rdi
+ adcq %rbx, %r15
+ adcq %rsi, %r10
+ adcq %r8, %r12
+ movq -80(%rsp), %rcx # 8-byte Reload
+ adcq %r9, %rcx
+ adcq $0, %r14
+ addq %rdx, %rdi
+ adcq %r13, %r15
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, -104(%rsp) # 8-byte Spill
+ adcq -96(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, -96(%rsp) # 8-byte Spill
+ adcq -120(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -80(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ movq -8(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rcx
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq -56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %r8
+ addq %rsi, %r8
+ adcq %rbp, %r10
+ adcq -24(%rsp), %r13 # 8-byte Folded Reload
+ adcq -16(%rsp), %r12 # 8-byte Folded Reload
+ adcq -8(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %rbx
+ addq %rdi, %r11
+ adcq %r15, %r8
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ adcq -80(%rsp), %r12 # 8-byte Folded Reload
+ adcq %r14, %r9
+ movq %r9, -16(%rsp) # 8-byte Spill
+ adcq $0, %rbx
+ movq (%rsp), %r9 # 8-byte Reload
+ imulq %r11, %r9
+ movq %r9, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ movq %r9, %rax
+ mulq 24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %r9, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %r9, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq %r9, %rax
+ movq -40(%rsp), %r15 # 8-byte Reload
+ mulq %r15
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %rcx
+ movq %r9, %rax
+ movq -32(%rsp), %r9 # 8-byte Reload
+ mulq %r9
+ addq %r11, %r14
+ adcq %r8, %rax
+ adcq %r10, %rcx
+ adcq %r13, %rbp
+ adcq %r12, %rdi
+ adcq -16(%rsp), %rsi # 8-byte Folded Reload
+ adcq $0, %rbx
+ addq -48(%rsp), %rax # 8-byte Folded Reload
+ adcq %rdx, %rcx
+ adcq -56(%rsp), %rbp # 8-byte Folded Reload
+ adcq -24(%rsp), %rdi # 8-byte Folded Reload
+ adcq -8(%rsp), %rsi # 8-byte Folded Reload
+ adcq (%rsp), %rbx # 8-byte Folded Reload
+ movq %rax, %r14
+ subq 8(%rsp), %r14 # 8-byte Folded Reload
+ movq %rcx, %r8
+ sbbq %r9, %r8
+ movq %rbp, %r9
+ sbbq %r15, %r9
+ movq %rdi, %r10
+ sbbq 16(%rsp), %r10 # 8-byte Folded Reload
+ movq %rsi, %r11
+ sbbq 24(%rsp), %r11 # 8-byte Folded Reload
+ movq %rbx, %r15
+ sbbq 32(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, %rdx
+ sarq $63, %rdx
+ cmovsq %rax, %r14
+ movq -88(%rsp), %rax # 8-byte Reload
+ movq %r14, (%rax)
+ cmovsq %rcx, %r8
+ movq %r8, 8(%rax)
+ cmovsq %rbp, %r9
+ movq %r9, 16(%rax)
+ cmovsq %rdi, %r10
+ movq %r10, 24(%rax)
+ cmovsq %rsi, %r11
+ movq %r11, 32(%rax)
+ cmovsq %rbx, %r15
+ movq %r15, 40(%rax)
+ addq $40, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end84:
+ .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L
+
+ .globl mcl_fp_montRed6L
+ .align 16, 0x90
+ .type mcl_fp_montRed6L,@function
+mcl_fp_montRed6L: # @mcl_fp_montRed6L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $16, %rsp
+ movq %rdx, %rcx
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq (%rcx), %r11
+ movq %r11, -24(%rsp) # 8-byte Spill
+ movq (%rsi), %r9
+ movq %r9, %rbp
+ imulq %rax, %rbp
+ movq 40(%rcx), %rdx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r12
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rdx
+ movq %rdx, 8(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r15
+ movq %rdx, %r8
+ movq 24(%rcx), %rdx
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq 16(%rcx), %rdi
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rdx, %r10
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, %rcx
+ movq %rax, %rdi
+ movq %rbp, %rax
+ mulq %r11
+ movq %rdx, %rbp
+ addq %rdi, %rbp
+ adcq %rbx, %rcx
+ adcq %r14, %r13
+ adcq %r15, %r10
+ adcq %r12, %r8
+ movq -72(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r9, %rax
+ adcq 8(%rsi), %rbp
+ adcq 16(%rsi), %rcx
+ adcq 24(%rsi), %r13
+ adcq 32(%rsi), %r10
+ adcq 40(%rsi), %r8
+ movq %r8, -112(%rsp) # 8-byte Spill
+ adcq 48(%rsi), %rdx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq 88(%rsi), %rax
+ movq 80(%rsi), %rdx
+ movq 72(%rsi), %rdi
+ movq 64(%rsi), %rbx
+ movq 56(%rsi), %r15
+ adcq $0, %r15
+ adcq $0, %rbx
+ movq %rbx, -96(%rsp) # 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -64(%rsp) # 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ sbbq %r12, %r12
+ andl $1, %r12d
+ movq %rbp, %rdi
+ imulq -32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ addq %r11, %rdi
+ adcq %r9, %rsi
+ adcq %r8, %rbx
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ movq -88(%rsp), %r8 # 8-byte Reload
+ adcq -120(%rsp), %r8 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %rbp, %rax
+ adcq %rcx, %rdi
+ adcq %r13, %rsi
+ adcq %r10, %rbx
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ adcq -72(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -88(%rsp) # 8-byte Spill
+ adcq %r15, %rdx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ adcq $0, -96(%rsp) # 8-byte Folded Spill
+ adcq $0, -64(%rsp) # 8-byte Folded Spill
+ adcq $0, -56(%rsp) # 8-byte Folded Spill
+ adcq $0, -48(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rdi, %rcx
+ imulq -32(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ addq %r10, %r9
+ adcq %r8, %rbp
+ adcq -128(%rsp), %r13 # 8-byte Folded Reload
+ adcq -120(%rsp), %r11 # 8-byte Folded Reload
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ movq -72(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq %rdi, %rax
+ adcq %rsi, %r9
+ adcq %rbx, %rbp
+ adcq %r14, %r13
+ adcq -88(%rsp), %r11 # 8-byte Folded Reload
+ adcq -80(%rsp), %r15 # 8-byte Folded Reload
+ adcq -96(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ adcq $0, -64(%rsp) # 8-byte Folded Spill
+ adcq $0, -56(%rsp) # 8-byte Folded Spill
+ adcq $0, -48(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %r9, %rsi
+ imulq -32(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ addq %rbx, %rdi
+ adcq %r10, %rcx
+ adcq -120(%rsp), %r8 # 8-byte Folded Reload
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ movq -88(%rsp), %rsi # 8-byte Reload
+ adcq -96(%rsp), %rsi # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r9, %rax
+ adcq %rbp, %rdi
+ adcq %r13, %rcx
+ adcq %r11, %r8
+ adcq %r15, %r14
+ adcq -72(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -88(%rsp) # 8-byte Spill
+ adcq -64(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ adcq $0, -56(%rsp) # 8-byte Folded Spill
+ movq -48(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ adcq $0, %r12
+ movq %rdi, %rsi
+ imulq -32(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ movq -40(%rsp), %r11 # 8-byte Reload
+ mulq %r11
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ addq %rbx, %rsi
+ adcq %r15, %r10
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ adcq -96(%rsp), %r9 # 8-byte Folded Reload
+ movq -72(%rsp), %rbx # 8-byte Reload
+ adcq -48(%rsp), %rbx # 8-byte Folded Reload
+ movq -64(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %rdi, %rax
+ adcq %rcx, %rsi
+ adcq %r8, %r10
+ adcq %r14, %r13
+ adcq -88(%rsp), %r9 # 8-byte Folded Reload
+ adcq -80(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, -72(%rsp) # 8-byte Spill
+ adcq -56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ adcq $0, %rbp
+ movq %rbp, -48(%rsp) # 8-byte Spill
+ adcq $0, %r12
+ movq -32(%rsp), %r8 # 8-byte Reload
+ imulq %rsi, %r8
+ movq %r8, %rax
+ mulq %r11
+ movq %rdx, %rdi
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r11
+ movq %r8, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r14
+ movq %r8, %rax
+ movq -24(%rsp), %r8 # 8-byte Reload
+ mulq %r8
+ addq %r14, %rdx
+ adcq %r11, %rbp
+ adcq -80(%rsp), %rbx # 8-byte Folded Reload
+ adcq -56(%rsp), %rcx # 8-byte Folded Reload
+ adcq -32(%rsp), %r15 # 8-byte Folded Reload
+ adcq $0, %rdi
+ addq %rsi, %rax
+ adcq %r10, %rdx
+ adcq %r13, %rbp
+ adcq %r9, %rbx
+ adcq -72(%rsp), %rcx # 8-byte Folded Reload
+ adcq -64(%rsp), %r15 # 8-byte Folded Reload
+ adcq -48(%rsp), %rdi # 8-byte Folded Reload
+ adcq $0, %r12
+ movq %rdx, %rax
+ subq %r8, %rax
+ movq %rbp, %rsi
+ sbbq -16(%rsp), %rsi # 8-byte Folded Reload
+ movq %rbx, %r9
+ sbbq -8(%rsp), %r9 # 8-byte Folded Reload
+ movq %rcx, %r10
+ sbbq (%rsp), %r10 # 8-byte Folded Reload
+ movq %r15, %r11
+ sbbq 8(%rsp), %r11 # 8-byte Folded Reload
+ movq %rdi, %r14
+ sbbq -40(%rsp), %r14 # 8-byte Folded Reload
+ sbbq $0, %r12
+ andl $1, %r12d
+ cmovneq %rdi, %r14
+ testb %r12b, %r12b
+ cmovneq %rdx, %rax
+ movq -104(%rsp), %rdx # 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rbp, %rsi
+ movq %rsi, 8(%rdx)
+ cmovneq %rbx, %r9
+ movq %r9, 16(%rdx)
+ cmovneq %rcx, %r10
+ movq %r10, 24(%rdx)
+ cmovneq %r15, %r11
+ movq %r11, 32(%rdx)
+ movq %r14, 40(%rdx)
+ addq $16, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end85:
+ .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L
+
+ .globl mcl_fp_addPre6L
+ .align 16, 0x90
+ .type mcl_fp_addPre6L,@function
+mcl_fp_addPre6L: # @mcl_fp_addPre6L
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 40(%rsi), %r11
+ movq 32(%rdx), %r9
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %r14
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r10, %rax
+ movq %rax, 24(%rdi)
+ adcq %r9, %r14
+ movq %r14, 32(%rdi)
+ adcq %r8, %r11
+ movq %r11, 40(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end86:
+ .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L
+
+ .globl mcl_fp_subPre6L
+ .align 16, 0x90
+ .type mcl_fp_subPre6L,@function
+mcl_fp_subPre6L: # @mcl_fp_subPre6L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 40(%rsi), %r9
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rsi
+ movq 24(%rdx), %r14
+ movq 32(%rdx), %r15
+ sbbq 16(%rdx), %rcx
+ movq %rbx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r14, %r11
+ movq %r11, 24(%rdi)
+ sbbq %r15, %r10
+ movq %r10, 32(%rdi)
+ sbbq %r8, %r9
+ movq %r9, 40(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end87:
+ .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L
+
+ .globl mcl_fp_shr1_6L
+ .align 16, 0x90
+ .type mcl_fp_shr1_6L,@function
+mcl_fp_shr1_6L: # @mcl_fp_shr1_6L
+# BB#0:
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %rdx
+ movq 16(%rsi), %rax
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rcx
+ movq %rcx, (%rdi)
+ shrdq $1, %rax, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rdx, %rax
+ movq %rax, 16(%rdi)
+ shrdq $1, %r9, %rdx
+ movq %rdx, 24(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 32(%rdi)
+ shrq %r8
+ movq %r8, 40(%rdi)
+ retq
+.Lfunc_end88:
+ .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L
+
+ .globl mcl_fp_add6L
+ .align 16, 0x90
+ .type mcl_fp_add6L,@function
+mcl_fp_add6L: # @mcl_fp_add6L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r14
+ movq 40(%rsi), %r8
+ movq 32(%rdx), %r15
+ movq 24(%rdx), %rbx
+ movq 24(%rsi), %r10
+ movq 32(%rsi), %r9
+ movq 16(%rdx), %r11
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r11
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r11, 16(%rdi)
+ adcq %rbx, %r10
+ movq %r10, 24(%rdi)
+ adcq %r15, %r9
+ movq %r9, 32(%rdi)
+ adcq %r14, %r8
+ movq %r8, 40(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r11
+ sbbq 24(%rcx), %r10
+ sbbq 32(%rcx), %r9
+ sbbq 40(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB89_2
+# BB#1: # %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r11, 16(%rdi)
+ movq %r10, 24(%rdi)
+ movq %r9, 32(%rdi)
+ movq %r8, 40(%rdi)
+.LBB89_2: # %carry
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end89:
+ .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L
+
+ .globl mcl_fp_addNF6L
+ .align 16, 0x90
+ .type mcl_fp_addNF6L,@function
+mcl_fp_addNF6L: # @mcl_fp_addNF6L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 32(%rdx), %r9
+ movq 24(%rdx), %r10
+ movq 16(%rdx), %r11
+ movq (%rdx), %r15
+ movq 8(%rdx), %r14
+ addq (%rsi), %r15
+ adcq 8(%rsi), %r14
+ adcq 16(%rsi), %r11
+ adcq 24(%rsi), %r10
+ adcq 32(%rsi), %r9
+ adcq 40(%rsi), %r8
+ movq %r15, %rsi
+ subq (%rcx), %rsi
+ movq %r14, %rbx
+ sbbq 8(%rcx), %rbx
+ movq %r11, %rdx
+ sbbq 16(%rcx), %rdx
+ movq %r10, %r13
+ sbbq 24(%rcx), %r13
+ movq %r9, %r12
+ sbbq 32(%rcx), %r12
+ movq %r8, %rax
+ sbbq 40(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r15, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r14, %rbx
+ movq %rbx, 8(%rdi)
+ cmovsq %r11, %rdx
+ movq %rdx, 16(%rdi)
+ cmovsq %r10, %r13
+ movq %r13, 24(%rdi)
+ cmovsq %r9, %r12
+ movq %r12, 32(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end90:
+ .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L
+
+ .globl mcl_fp_sub6L
+ .align 16, 0x90
+ .type mcl_fp_sub6L,@function
+mcl_fp_sub6L: # @mcl_fp_sub6L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r14
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r10
+ movq 16(%rsi), %r11
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %rsi
+ movq 24(%rdx), %r15
+ movq 32(%rdx), %r12
+ sbbq 16(%rdx), %r11
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r11, 16(%rdi)
+ sbbq %r15, %r10
+ movq %r10, 24(%rdi)
+ sbbq %r12, %r9
+ movq %r9, 32(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 40(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB91_2
+# BB#1: # %carry
+ movq 40(%rcx), %r14
+ movq 32(%rcx), %r15
+ movq 24(%rcx), %r12
+ movq 8(%rcx), %rbx
+ movq 16(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %rsi, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r11, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r10, %r12
+ movq %r12, 24(%rdi)
+ adcq %r9, %r15
+ movq %r15, 32(%rdi)
+ adcq %r8, %r14
+ movq %r14, 40(%rdi)
+.LBB91_2: # %nocarry
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end91:
+ .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L
+
+ .globl mcl_fp_subNF6L
+ .align 16, 0x90
+ .type mcl_fp_subNF6L,@function
+mcl_fp_subNF6L: # @mcl_fp_subNF6L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 40(%rsi), %r15
+ movq 32(%rsi), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %r11
+ movq 8(%rsi), %r14
+ subq (%rdx), %r11
+ sbbq 8(%rdx), %r14
+ sbbq 16(%rdx), %r10
+ sbbq 24(%rdx), %r9
+ sbbq 32(%rdx), %r8
+ sbbq 40(%rdx), %r15
+ movq %r15, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rbx
+ addq %rbx, %rbx
+ movq %rdx, %rsi
+ adcq %rsi, %rsi
+ andq 8(%rcx), %rsi
+ movq %r15, %rax
+ shrq $63, %rax
+ orq %rbx, %rax
+ andq (%rcx), %rax
+ movq 40(%rcx), %r12
+ andq %rdx, %r12
+ movq 32(%rcx), %r13
+ andq %rdx, %r13
+ movq 24(%rcx), %rbx
+ andq %rdx, %rbx
+ andq 16(%rcx), %rdx
+ addq %r11, %rax
+ movq %rax, (%rdi)
+ adcq %r14, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r10, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r9, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r8, %r13
+ movq %r13, 32(%rdi)
+ adcq %r15, %r12
+ movq %r12, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end92:
+ .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L
+
+ .globl mcl_fpDbl_add6L
+ .align 16, 0x90
+ .type mcl_fpDbl_add6L,@function
+mcl_fpDbl_add6L: # @mcl_fpDbl_add6L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 88(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 80(%rdx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 72(%rdx), %r14
+ movq 64(%rdx), %r15
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %r13
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %rbp
+ adcq 32(%rdx), %r13
+ movq 56(%rdx), %r11
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rdx
+ movq %rbx, (%rdi)
+ movq 88(%rsi), %r8
+ movq %rax, 8(%rdi)
+ movq 80(%rsi), %r10
+ movq %r12, 16(%rdi)
+ movq 72(%rsi), %r12
+ movq %rbp, 24(%rdi)
+ movq 40(%rsi), %rax
+ adcq %rdx, %rax
+ movq 64(%rsi), %rdx
+ movq %r13, 32(%rdi)
+ movq 56(%rsi), %r13
+ movq 48(%rsi), %rbp
+ adcq %r9, %rbp
+ movq %rax, 40(%rdi)
+ adcq %r11, %r13
+ adcq %r15, %rdx
+ adcq %r14, %r12
+ adcq -16(%rsp), %r10 # 8-byte Folded Reload
+ adcq -8(%rsp), %r8 # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rbp, %rsi
+ subq (%rcx), %rsi
+ movq %r13, %rbx
+ sbbq 8(%rcx), %rbx
+ movq %rdx, %r9
+ sbbq 16(%rcx), %r9
+ movq %r12, %r11
+ sbbq 24(%rcx), %r11
+ movq %r10, %r14
+ sbbq 32(%rcx), %r14
+ movq %r8, %r15
+ sbbq 40(%rcx), %r15
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rbp, %rsi
+ movq %rsi, 48(%rdi)
+ testb %al, %al
+ cmovneq %r13, %rbx
+ movq %rbx, 56(%rdi)
+ cmovneq %rdx, %r9
+ movq %r9, 64(%rdi)
+ cmovneq %r12, %r11
+ movq %r11, 72(%rdi)
+ cmovneq %r10, %r14
+ movq %r14, 80(%rdi)
+ cmovneq %r8, %r15
+ movq %r15, 88(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end93:
+ .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L
+
+ .globl mcl_fpDbl_sub6L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub6L,@function
+mcl_fpDbl_sub6L: # @mcl_fpDbl_sub6L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 88(%rdx), %r9
+ movq 80(%rdx), %r10
+ movq 72(%rdx), %r14
+ movq 16(%rsi), %r8
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %eax, %eax
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r8
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 64(%rdx), %r13
+ movq %r15, (%rdi)
+ movq 56(%rdx), %rbp
+ movq %r11, 8(%rdi)
+ movq 48(%rdx), %r15
+ movq 40(%rdx), %rdx
+ movq %r8, 16(%rdi)
+ movq 88(%rsi), %r8
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %rdx, %rbx
+ movq 80(%rsi), %r11
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %rdx
+ sbbq %r15, %rdx
+ movq 72(%rsi), %r15
+ movq %rbx, 40(%rdi)
+ movq 64(%rsi), %r12
+ movq 56(%rsi), %rsi
+ sbbq %rbp, %rsi
+ sbbq %r13, %r12
+ sbbq %r14, %r15
+ sbbq %r10, %r11
+ sbbq %r9, %r8
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%rcx), %r14
+ cmoveq %rax, %r14
+ testb %bpl, %bpl
+ movq 16(%rcx), %r9
+ cmoveq %rax, %r9
+ movq 8(%rcx), %rbp
+ cmoveq %rax, %rbp
+ movq 40(%rcx), %r10
+ cmoveq %rax, %r10
+ movq 32(%rcx), %rbx
+ cmoveq %rax, %rbx
+ cmovneq 24(%rcx), %rax
+ addq %rdx, %r14
+ movq %r14, 48(%rdi)
+ adcq %rsi, %rbp
+ movq %rbp, 56(%rdi)
+ adcq %r12, %r9
+ movq %r9, 64(%rdi)
+ adcq %r15, %rax
+ movq %rax, 72(%rdi)
+ adcq %r11, %rbx
+ movq %rbx, 80(%rdi)
+ adcq %r8, %r10
+ movq %r10, 88(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end94:
+ .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L
+
+ .globl mcl_fp_mulUnitPre7L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre7L,@function
+mcl_fp_mulUnitPre7L: # @mcl_fp_mulUnitPre7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 48(%rsi)
+ movq %rdx, %r10
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 40(%rsi)
+ movq %rdx, %r11
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %r15
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r13
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %rbp, %r8
+ movq %r8, 16(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r14, %r13
+ movq %r13, 32(%rdi)
+ adcq -16(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, 40(%rdi)
+ adcq -8(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 48(%rdi)
+ adcq $0, %r10
+ movq %r10, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end95:
+ .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L
+
+ .globl mcl_fpDbl_mulPre7L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre7L,@function
+mcl_fpDbl_mulPre7L: # @mcl_fpDbl_mulPre7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $24, %rsp
+ movq %rdx, 8(%rsp) # 8-byte Spill
+ movq %rsi, %r9
+ movq %rdi, 16(%rsp) # 8-byte Spill
+ movq (%r9), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 8(%r9), %r10
+ movq %r10, -64(%rsp) # 8-byte Spill
+ movq (%rdx), %rsi
+ mulq %rsi
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 16(%r9), %r11
+ movq %r11, -72(%rsp) # 8-byte Spill
+ movq 24(%r9), %rbx
+ movq %rbx, -56(%rsp) # 8-byte Spill
+ movq 32(%r9), %rbp
+ movq %rbp, -24(%rsp) # 8-byte Spill
+ movq 40(%r9), %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ movq 48(%r9), %r14
+ movq %rax, (%rdi)
+ movq %r14, %rax
+ mulq %rsi
+ movq %rdx, %rdi
+ movq %rax, (%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq %rsi
+ movq %rdx, %rcx
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rsi
+ movq %rdx, %rbp
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq %rsi
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %r11, %rax
+ mulq %rsi
+ movq %rdx, %r12
+ movq %rax, %r13
+ movq %r10, %rax
+ mulq %rsi
+ movq %rdx, %rsi
+ movq %rax, %r10
+ addq -32(%rsp), %r10 # 8-byte Folded Reload
+ adcq %r13, %rsi
+ adcq %r8, %r12
+ adcq %r15, %rbx
+ adcq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, -48(%rsp) # 8-byte Spill
+ adcq (%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -32(%rsp) # 8-byte Spill
+ movq 8(%rsp), %r11 # 8-byte Reload
+ movq 8(%r11), %rcx
+ movq %r14, %rax
+ mulq %rcx
+ movq %rdx, %r14
+ movq %rax, (%rsp) # 8-byte Spill
+ movq -16(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq -24(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq -56(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq -72(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq -64(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq -8(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ addq %r10, %rax
+ movq 16(%rsp), %r10 # 8-byte Reload
+ movq %rax, 8(%r10)
+ adcq %rsi, %rdi
+ adcq %r12, %rbp
+ adcq %rbx, %r15
+ adcq -48(%rsp), %r13 # 8-byte Folded Reload
+ movq %r8, %rcx
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq (%rsp), %rax # 8-byte Reload
+ adcq -32(%rsp), %rax # 8-byte Folded Reload
+ sbbq %r8, %r8
+ andl $1, %r8d
+ addq %rdx, %rdi
+ adcq -64(%rsp), %rbp # 8-byte Folded Reload
+ adcq -72(%rsp), %r15 # 8-byte Folded Reload
+ adcq -56(%rsp), %r13 # 8-byte Folded Reload
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -88(%rsp) # 8-byte Spill
+ adcq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, (%rsp) # 8-byte Spill
+ adcq %r14, %r8
+ movq 48(%r9), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 16(%r11), %rcx
+ mulq %rcx
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq 40(%r9), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq 32(%r9), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %r12
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq 24(%r9), %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %r14
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq 16(%r9), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %rbx
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq (%r9), %rsi
+ movq 8(%r9), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq %rcx
+ addq %rdi, %rax
+ movq %rax, 16(%r10)
+ adcq %rbp, %r11
+ adcq %r15, %rbx
+ adcq %r13, %r14
+ adcq -88(%rsp), %r12 # 8-byte Folded Reload
+ movq -16(%rsp), %rdi # 8-byte Reload
+ adcq (%rsp), %rdi # 8-byte Folded Reload
+ movq -96(%rsp), %rax # 8-byte Reload
+ adcq %r8, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %rdx, %r11
+ adcq -120(%rsp), %rbx # 8-byte Folded Reload
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ adcq -104(%rsp), %r12 # 8-byte Folded Reload
+ adcq -80(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -16(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, %rdi
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq 8(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rbp
+ movq -8(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, (%rsp) # 8-byte Spill
+ movq -24(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq -32(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq -48(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq -56(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq -64(%rsp), %rax # 8-byte Reload
+ mulq %rbp
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq %rbp
+ addq %r11, %rax
+ movq 16(%rsp), %rsi # 8-byte Reload
+ movq %rax, 24(%rsi)
+ adcq %rbx, %r10
+ adcq %r14, %r8
+ adcq %r12, %r15
+ adcq -16(%rsp), %r13 # 8-byte Folded Reload
+ movq -8(%rsp), %rsi # 8-byte Reload
+ adcq %rdi, %rsi
+ movq (%rsp), %rax # 8-byte Reload
+ adcq %rcx, %rax
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ addq %rdx, %r10
+ adcq -64(%rsp), %r8 # 8-byte Folded Reload
+ adcq -56(%rsp), %r15 # 8-byte Folded Reload
+ adcq -48(%rsp), %r13 # 8-byte Folded Reload
+ adcq -32(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -8(%rsp) # 8-byte Spill
+ adcq -24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, (%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rdi # 8-byte Folded Reload
+ movq 48(%r9), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 8(%rsp), %rbx # 8-byte Reload
+ movq 32(%rbx), %rcx
+ mulq %rcx
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq 40(%r9), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ movq 32(%r9), %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %r12
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq 24(%r9), %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %rbp
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq 16(%r9), %rax
+ movq %rax, -72(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rax, %r14
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq (%r9), %rsi
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ movq 8(%r9), %rax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ mulq %rcx
+ movq %rdx, -128(%rsp) # 8-byte Spill
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq %rcx
+ addq %r10, %rax
+ movq 16(%rsp), %rcx # 8-byte Reload
+ movq %rax, 32(%rcx)
+ adcq %r8, %r11
+ adcq %r15, %r14
+ adcq %r13, %rbp
+ adcq -8(%rsp), %r12 # 8-byte Folded Reload
+ movq -24(%rsp), %rcx # 8-byte Reload
+ adcq (%rsp), %rcx # 8-byte Folded Reload
+ movq -16(%rsp), %rax # 8-byte Reload
+ adcq %rdi, %rax
+ sbbq %r13, %r13
+ andl $1, %r13d
+ addq %rdx, %r11
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ adcq -120(%rsp), %rbp # 8-byte Folded Reload
+ adcq -112(%rsp), %r12 # 8-byte Folded Reload
+ adcq -104(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ adcq -96(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -16(%rsp) # 8-byte Spill
+ adcq -56(%rsp), %r13 # 8-byte Folded Reload
+ movq 40(%rbx), %rcx
+ movq -32(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq -40(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq -48(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq -64(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %rbx
+ movq -72(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ movq -88(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq -80(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ addq %r11, %rax
+ movq 16(%rsp), %rcx # 8-byte Reload
+ movq %rax, 40(%rcx)
+ adcq %r14, %r8
+ adcq %rbp, %rsi
+ adcq %r12, %rbx
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ adcq -16(%rsp), %r10 # 8-byte Folded Reload
+ adcq %r13, %rdi
+ movq 8(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %r11
+ sbbq %rcx, %rcx
+ movq %r11, %rax
+ mulq 48(%r9)
+ movq %rdx, 8(%rsp) # 8-byte Spill
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %r11, %rax
+ mulq 40(%r9)
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %r11, %rax
+ mulq 32(%r9)
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq %r11, %rax
+ mulq 24(%r9)
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %r11, %rax
+ mulq 16(%r9)
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq %r11, %rax
+ mulq 8(%r9)
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %r11, %rax
+ mulq (%r9)
+ andl $1, %ecx
+ addq -96(%rsp), %r8 # 8-byte Folded Reload
+ adcq -72(%rsp), %rsi # 8-byte Folded Reload
+ adcq -48(%rsp), %rbx # 8-byte Folded Reload
+ adcq -40(%rsp), %r15 # 8-byte Folded Reload
+ adcq -32(%rsp), %r10 # 8-byte Folded Reload
+ adcq -8(%rsp), %rdi # 8-byte Folded Reload
+ adcq (%rsp), %rcx # 8-byte Folded Reload
+ addq %rax, %r8
+ movq 16(%rsp), %r9 # 8-byte Reload
+ movq %r8, 48(%r9)
+ adcq %r12, %rsi
+ adcq %r14, %rbx
+ adcq %rbp, %r15
+ adcq %r13, %r10
+ adcq -88(%rsp), %rdi # 8-byte Folded Reload
+ adcq -64(%rsp), %rcx # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rsi
+ adcq -104(%rsp), %rbx # 8-byte Folded Reload
+ movq %r9, %rdx
+ movq %rsi, 56(%rdx)
+ movq %rbx, 64(%rdx)
+ adcq -80(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, 72(%rdx)
+ adcq -56(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 80(%rdx)
+ adcq -24(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 88(%rdx)
+ adcq -16(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 96(%rdx)
+ adcq 8(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 104(%rdx)
+ addq $24, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end96:
+ .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L
+
+ .globl mcl_fpDbl_sqrPre7L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre7L,@function
+mcl_fpDbl_sqrPre7L: # @mcl_fpDbl_sqrPre7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $16, %rsp
+ movq %rdi, 8(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r11
+ movq %r11, -64(%rsp) # 8-byte Spill
+ movq 24(%rsi), %r14
+ movq %r14, -48(%rsp) # 8-byte Spill
+ movq 32(%rsi), %r9
+ movq %r9, -24(%rsp) # 8-byte Spill
+ movq 40(%rsi), %r10
+ movq %r10, -16(%rsp) # 8-byte Spill
+ movq 48(%rsi), %r8
+ movq (%rsi), %rbp
+ movq 8(%rsi), %rbx
+ movq %rbp, %rax
+ mulq %rbp
+ movq %rdx, %rcx
+ movq %rax, (%rdi)
+ movq %r8, %rax
+ mulq %rbp
+ movq %rdx, %r15
+ movq %rax, (%rsp) # 8-byte Spill
+ movq %r10, %rax
+ mulq %rbp
+ movq %rdx, %rdi
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq %rbp
+ movq %rdx, %r9
+ movq %rax, %r10
+ movq %r14, %rax
+ mulq %rbp
+ movq %rdx, %r13
+ movq %rax, %r14
+ movq %r11, %rax
+ mulq %rbp
+ movq %rdx, %r12
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rax, -56(%rsp) # 8-byte Spill
+ addq %rax, %rcx
+ adcq %rdx, %r11
+ adcq %r14, %r12
+ adcq %r10, %r13
+ adcq -32(%rsp), %r9 # 8-byte Folded Reload
+ adcq (%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -40(%rsp) # 8-byte Spill
+ adcq $0, %r15
+ movq %r15, -32(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq %rbx
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq -16(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq -24(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq -48(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq -64(%rsp), %rax # 8-byte Reload
+ mulq %rbx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rax, %rbx
+ addq -56(%rsp), %rcx # 8-byte Folded Reload
+ movq 8(%rsp), %rax # 8-byte Reload
+ movq %rcx, 8(%rax)
+ adcq %r11, %rbx
+ adcq %r12, %rbp
+ adcq %r13, %r14
+ adcq %r9, %r10
+ adcq -40(%rsp), %r15 # 8-byte Folded Reload
+ adcq -32(%rsp), %rdi # 8-byte Folded Reload
+ sbbq %r8, %r8
+ andl $1, %r8d
+ addq -8(%rsp), %rbx # 8-byte Folded Reload
+ adcq %rdx, %rbp
+ adcq -64(%rsp), %r14 # 8-byte Folded Reload
+ adcq -48(%rsp), %r10 # 8-byte Folded Reload
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ adcq -16(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ adcq (%rsp), %r8 # 8-byte Folded Reload
+ movq 48(%rsi), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rdi
+ mulq %rdi
+ movq %rax, (%rsp) # 8-byte Spill
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq 40(%rsi), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rax, %r13
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rcx
+ movq %rcx, %rax
+ mulq %rdi
+ movq %rax, %r9
+ movq %r9, -104(%rsp) # 8-byte Spill
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq (%rsi), %r12
+ movq %r12, -48(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ mulq %rdi
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ movq %rax, %r11
+ movq %r12, %rax
+ mulq %rdi
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %rdi, %rax
+ mulq %rdi
+ movq %rax, %rdi
+ addq %rbx, %r12
+ movq 8(%rsp), %rax # 8-byte Reload
+ movq %r12, 16(%rax)
+ adcq %rbp, %r11
+ adcq %r14, %rdi
+ adcq %r9, %r10
+ adcq %r15, %r13
+ movq -88(%rsp), %r14 # 8-byte Reload
+ adcq -72(%rsp), %r14 # 8-byte Folded Reload
+ movq (%rsp), %rax # 8-byte Reload
+ adcq %r8, %rax
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq -112(%rsp), %r11 # 8-byte Folded Reload
+ adcq -96(%rsp), %rdi # 8-byte Folded Reload
+ adcq %rdx, %r10
+ adcq -16(%rsp), %r13 # 8-byte Folded Reload
+ adcq -64(%rsp), %r14 # 8-byte Folded Reload
+ adcq -56(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, (%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rbx # 8-byte Folded Reload
+ movq -8(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq -24(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq -32(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, %r9
+ movq -80(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq -48(%rsp), %rax # 8-byte Reload
+ mulq %rcx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rax, %r12
+ movq %rdx, -96(%rsp) # 8-byte Spill
+ addq %r11, %rbp
+ movq 8(%rsp), %rax # 8-byte Reload
+ movq %rbp, 24(%rax)
+ adcq %rdi, %r15
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ adcq %r13, %r12
+ movq %r9, %rcx
+ adcq %r14, %rcx
+ movq -8(%rsp), %rdi # 8-byte Reload
+ adcq (%rsp), %rdi # 8-byte Folded Reload
+ adcq %rbx, %r8
+ sbbq %r14, %r14
+ andl $1, %r14d
+ movq (%rsi), %r9
+ movq 8(%rsi), %rbp
+ movq 40(%rsi), %r11
+ movq %rbp, %rax
+ mulq %r11
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq %r11
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq 32(%rsi), %rbx
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq %rbx
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ addq -88(%rsp), %r15 # 8-byte Folded Reload
+ adcq -80(%rsp), %r10 # 8-byte Folded Reload
+ adcq -16(%rsp), %r12 # 8-byte Folded Reload
+ adcq -96(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -96(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -8(%rsp) # 8-byte Spill
+ adcq -64(%rsp), %r8 # 8-byte Folded Reload
+ adcq -56(%rsp), %r14 # 8-byte Folded Reload
+ movq 48(%rsi), %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %rcx
+ movq %r11, %rax
+ mulq %rbx
+ movq %rax, %rbp
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq 24(%rsi), %rax
+ movq %rax, -72(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rax, %rdi
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rdx, -128(%rsp) # 8-byte Spill
+ movq %rax, %r9
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rax, %r13
+ addq -120(%rsp), %r15 # 8-byte Folded Reload
+ movq 8(%rsp), %rax # 8-byte Reload
+ movq %r15, 32(%rax)
+ adcq -112(%rsp), %r10 # 8-byte Folded Reload
+ adcq %r12, %r9
+ adcq -96(%rsp), %rdi # 8-byte Folded Reload
+ adcq -8(%rsp), %r13 # 8-byte Folded Reload
+ adcq %rbp, %r8
+ adcq %r14, %rcx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq -104(%rsp), %r10 # 8-byte Folded Reload
+ adcq (%rsp), %r9 # 8-byte Folded Reload
+ adcq -128(%rsp), %rdi # 8-byte Folded Reload
+ adcq -88(%rsp), %r13 # 8-byte Folded Reload
+ adcq %rdx, %r8
+ adcq -16(%rsp), %rcx # 8-byte Folded Reload
+ adcq -64(%rsp), %rbx # 8-byte Folded Reload
+ movq -56(%rsp), %rax # 8-byte Reload
+ mulq %r11
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rax, (%rsp) # 8-byte Spill
+ movq -72(%rsp), %rax # 8-byte Reload
+ mulq %r11
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq -80(%rsp), %rax # 8-byte Reload
+ mulq %r11
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq %r11, %rax
+ mulq %r11
+ movq %rax, %r12
+ addq -48(%rsp), %r10 # 8-byte Folded Reload
+ movq 8(%rsp), %rax # 8-byte Reload
+ movq %r10, 40(%rax)
+ adcq -40(%rsp), %r9 # 8-byte Folded Reload
+ adcq %rdi, %r14
+ adcq %r13, %r15
+ adcq %rbp, %r8
+ adcq %rcx, %r12
+ movq (%rsp), %rax # 8-byte Reload
+ adcq %rbx, %rax
+ sbbq %r11, %r11
+ andl $1, %r11d
+ addq -32(%rsp), %r9 # 8-byte Folded Reload
+ adcq -24(%rsp), %r14 # 8-byte Folded Reload
+ adcq -64(%rsp), %r15 # 8-byte Folded Reload
+ adcq -56(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -32(%rsp) # 8-byte Spill
+ adcq -16(%rsp), %r12 # 8-byte Folded Reload
+ adcq %rdx, %rax
+ movq %rax, (%rsp) # 8-byte Spill
+ adcq -8(%rsp), %r11 # 8-byte Folded Reload
+ movq 48(%rsi), %rcx
+ movq %rcx, %rax
+ mulq 40(%rsi)
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, %rbx
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq %rcx
+ addq %r9, %rsi
+ movq 8(%rsp), %r9 # 8-byte Reload
+ movq %rsi, 48(%r9)
+ adcq %r14, %rdi
+ adcq %r15, %r10
+ adcq -32(%rsp), %rbp # 8-byte Folded Reload
+ adcq %r12, %rbx
+ adcq (%rsp), %r8 # 8-byte Folded Reload
+ adcq %r11, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r13, %rdi
+ adcq -48(%rsp), %r10 # 8-byte Folded Reload
+ movq %r9, %rsi
+ movq %rdi, 56(%rsi)
+ movq %r10, 64(%rsi)
+ adcq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 72(%rsi)
+ adcq -24(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 80(%rsi)
+ adcq -16(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 88(%rsi)
+ adcq -8(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 96(%rsi)
+ adcq %rdx, %rcx
+ movq %rcx, 104(%rsi)
+ addq $16, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end97:
+ .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L
+
+ .globl mcl_fp_mont7L
+ .align 16, 0x90
+ .type mcl_fp_mont7L,@function
+mcl_fp_mont7L: # @mcl_fp_mont7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $96, %rsp
+ movq %rdx, 24(%rsp) # 8-byte Spill
+ movq %rdi, -96(%rsp) # 8-byte Spill
+ movq 48(%rsi), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq (%rdx), %rbx
+ mulq %rbx
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq %rdx, %r15
+ movq 40(%rsi), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq %rdx, %r12
+ movq 32(%rsi), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 24(%rsi), %r9
+ movq %r9, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r10
+ movq %r10, -16(%rsp) # 8-byte Spill
+ movq (%rsi), %r13
+ movq %r13, (%rsp) # 8-byte Spill
+ movq 8(%rsi), %rsi
+ movq %rsi, -8(%rsp) # 8-byte Spill
+ mulq %rbx
+ movq %rdx, %r14
+ movq %rax, %r8
+ movq %r9, %rax
+ mulq %rbx
+ movq %rdx, %rdi
+ movq %rax, %r9
+ movq %r10, %rax
+ mulq %rbx
+ movq %rdx, %rbp
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, %rsi
+ movq %rax, %r11
+ movq %r13, %rax
+ mulq %rbx
+ movq %rax, -112(%rsp) # 8-byte Spill
+ addq %r11, %rdx
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ adcq %r10, %rsi
+ movq %rsi, -88(%rsp) # 8-byte Spill
+ adcq %r9, %rbp
+ movq %rbp, -80(%rsp) # 8-byte Spill
+ adcq %r8, %rdi
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ adcq 80(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, -64(%rsp) # 8-byte Spill
+ adcq 88(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, -48(%rsp) # 8-byte Spill
+ adcq $0, %r15
+ movq %r15, -40(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, 32(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ imulq %rdx, %rdi
+ movq (%rcx), %r12
+ movq %r12, 40(%rsp) # 8-byte Spill
+ movq 48(%rcx), %rdx
+ movq %rdx, 64(%rsp) # 8-byte Spill
+ movq 40(%rcx), %r9
+ movq %r9, 88(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rbx
+ movq %rbx, 80(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rsi
+ movq %rsi, 72(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rbp
+ movq %rbp, 48(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq %r9
+ movq %rdx, %r14
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq %rbx
+ movq %rdx, %r11
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq %rsi
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq %rbp
+ movq %rdx, %r8
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rdx, %rbp
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq %r12
+ movq %rdx, %r12
+ addq %r9, %r12
+ adcq %r13, %rbp
+ adcq %r10, %r8
+ adcq %r15, %rbx
+ adcq -128(%rsp), %r11 # 8-byte Folded Reload
+ adcq -120(%rsp), %r14 # 8-byte Folded Reload
+ movq -56(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq -112(%rsp), %rax # 8-byte Folded Reload
+ adcq -104(%rsp), %r12 # 8-byte Folded Reload
+ adcq -88(%rsp), %rbp # 8-byte Folded Reload
+ adcq -80(%rsp), %r8 # 8-byte Folded Reload
+ adcq -72(%rsp), %rbx # 8-byte Folded Reload
+ adcq -64(%rsp), %r11 # 8-byte Folded Reload
+ adcq -48(%rsp), %r14 # 8-byte Folded Reload
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq 24(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdi
+ movq %rdi, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %rdx, %rdi
+ addq %r10, %rdi
+ adcq %r13, %r15
+ adcq -112(%rsp), %r9 # 8-byte Folded Reload
+ movq %rcx, %rdx
+ adcq -104(%rsp), %rdx # 8-byte Folded Reload
+ adcq -88(%rsp), %rsi # 8-byte Folded Reload
+ movq -48(%rsp), %rax # 8-byte Reload
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ movq -40(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ movq -64(%rsp), %r10 # 8-byte Reload
+ addq %r12, %r10
+ movq %r10, -64(%rsp) # 8-byte Spill
+ adcq %rbp, %rdi
+ adcq %r8, %r15
+ adcq %rbx, %r9
+ adcq %r11, %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ adcq %r14, %rsi
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ adcq -56(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -48(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %r10, %rbp
+ imulq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r10
+ movq %rbp, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ addq %r14, %r11
+ adcq %r10, %rsi
+ adcq %rbx, %rcx
+ adcq -120(%rsp), %r13 # 8-byte Folded Reload
+ adcq -112(%rsp), %r12 # 8-byte Folded Reload
+ adcq -104(%rsp), %r8 # 8-byte Folded Reload
+ movq -72(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ addq -64(%rsp), %rax # 8-byte Folded Reload
+ adcq %rdi, %r11
+ adcq %r15, %rsi
+ adcq %r9, %rcx
+ adcq -88(%rsp), %r13 # 8-byte Folded Reload
+ adcq -80(%rsp), %r12 # 8-byte Folded Reload
+ adcq -48(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -80(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, -72(%rsp) # 8-byte Spill
+ movq -56(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ movq 24(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdi
+ movq %rdi, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r8
+ addq %r14, %r8
+ adcq %r9, %rbx
+ adcq -120(%rsp), %r15 # 8-byte Folded Reload
+ movq -64(%rsp), %r9 # 8-byte Reload
+ adcq -112(%rsp), %r9 # 8-byte Folded Reload
+ movq -56(%rsp), %rdi # 8-byte Reload
+ adcq -104(%rsp), %rdi # 8-byte Folded Reload
+ movq -48(%rsp), %rdx # 8-byte Reload
+ adcq -88(%rsp), %rdx # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %r11, %r10
+ adcq %rsi, %r8
+ adcq %rcx, %rbx
+ adcq %r13, %r15
+ adcq %r12, %r9
+ movq %r9, -64(%rsp) # 8-byte Spill
+ adcq -80(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -56(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ adcq %rbp, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %r10, %rbp
+ imulq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 88(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rcx
+ movq %rbp, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r13
+ movq %rbp, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ addq %r12, %r14
+ adcq %r13, %rsi
+ adcq %rcx, %rdi
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq -104(%rsp), %rdx # 8-byte Folded Reload
+ movq -72(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq %r10, %rax
+ adcq %r8, %r14
+ adcq %rbx, %rsi
+ adcq %r15, %rdi
+ adcq -64(%rsp), %r9 # 8-byte Folded Reload
+ adcq -56(%rsp), %r11 # 8-byte Folded Reload
+ adcq -48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ adcq $0, -88(%rsp) # 8-byte Folded Spill
+ movq 24(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rcx
+ movq %rcx, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r13
+ addq %r12, %r13
+ adcq %r8, %rbp
+ adcq -120(%rsp), %rbx # 8-byte Folded Reload
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ movq -56(%rsp), %rdx # 8-byte Reload
+ adcq -104(%rsp), %rdx # 8-byte Folded Reload
+ movq -48(%rsp), %rcx # 8-byte Reload
+ adcq -64(%rsp), %rcx # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %r14, %r10
+ adcq %rsi, %r13
+ adcq %rdi, %rbp
+ adcq %r9, %rbx
+ adcq %r11, %r15
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -48(%rsp) # 8-byte Spill
+ adcq -88(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -40(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %r10, %rsi
+ imulq 32(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 88(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %rsi, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ addq %r11, %r14
+ adcq %r9, %r8
+ adcq -120(%rsp), %rcx # 8-byte Folded Reload
+ adcq -112(%rsp), %rdi # 8-byte Folded Reload
+ adcq -104(%rsp), %r12 # 8-byte Folded Reload
+ movq -80(%rsp), %rsi # 8-byte Reload
+ adcq -88(%rsp), %rsi # 8-byte Folded Reload
+ movq -72(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r10, %rax
+ adcq %r13, %r14
+ adcq %rbp, %r8
+ adcq %rbx, %rcx
+ adcq %r15, %rdi
+ adcq -56(%rsp), %r12 # 8-byte Folded Reload
+ adcq -48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq -64(%rsp), %r11 # 8-byte Reload
+ adcq $0, %r11
+ movq 24(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rbp
+ movq %rbp, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r15
+ movq %rbp, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rax, %rsi
+ movq %rdx, %r10
+ addq %r15, %r10
+ adcq %r9, %r13
+ adcq -120(%rsp), %rbx # 8-byte Folded Reload
+ movq -64(%rsp), %r15 # 8-byte Reload
+ adcq -112(%rsp), %r15 # 8-byte Folded Reload
+ movq -56(%rsp), %rbp # 8-byte Reload
+ adcq -104(%rsp), %rbp # 8-byte Folded Reload
+ movq -48(%rsp), %rdx # 8-byte Reload
+ adcq -88(%rsp), %rdx # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ movq %rsi, %r9
+ addq %r14, %r9
+ adcq %r8, %r10
+ adcq %rcx, %r13
+ adcq %rdi, %rbx
+ adcq %r12, %r15
+ movq %r15, -64(%rsp) # 8-byte Spill
+ adcq -80(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, -56(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ adcq %r11, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %r9, %rsi
+ movq %r9, %r11
+ imulq 32(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 88(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ addq %r15, %r14
+ adcq %r12, %rcx
+ adcq %rdi, %rbp
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq -112(%rsp), %r8 # 8-byte Folded Reload
+ movq -80(%rsp), %rsi # 8-byte Reload
+ adcq -104(%rsp), %rsi # 8-byte Folded Reload
+ movq -72(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r11, %rax
+ adcq %r10, %r14
+ adcq %r13, %rcx
+ adcq %rbx, %rbp
+ adcq -64(%rsp), %r9 # 8-byte Folded Reload
+ adcq -56(%rsp), %r8 # 8-byte Folded Reload
+ adcq -48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ adcq $0, -88(%rsp) # 8-byte Folded Spill
+ movq 24(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdi
+ movq %rdi, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rax, %rdi
+ movq %rdx, %r11
+ addq %r13, %r11
+ adcq %r15, %rsi
+ adcq %r10, %rbx
+ adcq -112(%rsp), %r12 # 8-byte Folded Reload
+ movq -56(%rsp), %r10 # 8-byte Reload
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ movq -48(%rsp), %rdx # 8-byte Reload
+ adcq -64(%rsp), %rdx # 8-byte Folded Reload
+ movq -40(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %r14, %rdi
+ adcq %rcx, %r11
+ adcq %rbp, %rsi
+ adcq %r9, %rbx
+ adcq %r8, %r12
+ adcq -80(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, -56(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ adcq -88(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -40(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rdi, %rbp
+ imulq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 88(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r8
+ movq %rbp, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ addq %r9, %r15
+ adcq %r8, %r13
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ adcq -112(%rsp), %rcx # 8-byte Folded Reload
+ adcq -104(%rsp), %r14 # 8-byte Folded Reload
+ movq -72(%rsp), %rdx # 8-byte Reload
+ adcq -88(%rsp), %rdx # 8-byte Folded Reload
+ movq -64(%rsp), %r8 # 8-byte Reload
+ adcq $0, %r8
+ addq %rdi, %rax
+ adcq %r11, %r15
+ adcq %rsi, %r13
+ adcq %rbx, %r10
+ adcq %r12, %rcx
+ adcq -56(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, -56(%rsp) # 8-byte Spill
+ adcq -48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -64(%rsp) # 8-byte Spill
+ movq -80(%rsp), %rsi # 8-byte Reload
+ adcq $0, %rsi
+ movq 24(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdi
+ movq %rdi, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, 24(%rsp) # 8-byte Spill
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r12
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ addq %rbp, %rdx
+ movq %rdx, %rbp
+ adcq %rbx, %r9
+ adcq %r12, %r14
+ movq %r8, %rdi
+ adcq -32(%rsp), %rdi # 8-byte Folded Reload
+ adcq 8(%rsp), %r11 # 8-byte Folded Reload
+ movq 24(%rsp), %rbx # 8-byte Reload
+ adcq -40(%rsp), %rbx # 8-byte Folded Reload
+ movq 16(%rsp), %r8 # 8-byte Reload
+ adcq $0, %r8
+ addq %r15, %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ adcq %r13, %rbp
+ movq %rbp, 8(%rsp) # 8-byte Spill
+ adcq %r10, %r9
+ movq %r9, (%rsp) # 8-byte Spill
+ adcq %rcx, %r14
+ movq %r14, -8(%rsp) # 8-byte Spill
+ adcq -56(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -16(%rsp) # 8-byte Spill
+ adcq -72(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, -24(%rsp) # 8-byte Spill
+ adcq -64(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 24(%rsp) # 8-byte Spill
+ adcq %rsi, %r8
+ movq %r8, 16(%rsp) # 8-byte Spill
+ sbbq %rcx, %rcx
+ movq 32(%rsp), %r10 # 8-byte Reload
+ imulq %rax, %r10
+ andl $1, %ecx
+ movq %r10, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq %r10, %rax
+ mulq 88(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %r10, %rax
+ mulq 80(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %r10, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %r10, %rax
+ movq 48(%rsp), %r13 # 8-byte Reload
+ mulq %r13
+ movq %rdx, %rbp
+ movq %rax, %r12
+ movq %r10, %rax
+ movq 40(%rsp), %r15 # 8-byte Reload
+ mulq %r15
+ movq %rdx, %r11
+ movq %rax, %r8
+ movq %r10, %rax
+ movq 56(%rsp), %r14 # 8-byte Reload
+ mulq %r14
+ addq %r11, %rax
+ adcq %r12, %rdx
+ adcq -56(%rsp), %rbp # 8-byte Folded Reload
+ adcq -48(%rsp), %rsi # 8-byte Folded Reload
+ adcq -40(%rsp), %rdi # 8-byte Folded Reload
+ adcq 32(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %rbx
+ addq -32(%rsp), %r8 # 8-byte Folded Reload
+ adcq 8(%rsp), %rax # 8-byte Folded Reload
+ adcq (%rsp), %rdx # 8-byte Folded Reload
+ adcq -8(%rsp), %rbp # 8-byte Folded Reload
+ adcq -16(%rsp), %rsi # 8-byte Folded Reload
+ adcq -24(%rsp), %rdi # 8-byte Folded Reload
+ adcq 24(%rsp), %r9 # 8-byte Folded Reload
+ adcq 16(%rsp), %rbx # 8-byte Folded Reload
+ adcq $0, %rcx
+ movq %rax, %r8
+ subq %r15, %r8
+ movq %rdx, %r10
+ sbbq %r14, %r10
+ movq %rbp, %r11
+ sbbq %r13, %r11
+ movq %rsi, %r14
+ sbbq 72(%rsp), %r14 # 8-byte Folded Reload
+ movq %rdi, %r15
+ sbbq 80(%rsp), %r15 # 8-byte Folded Reload
+ movq %r9, %r12
+ sbbq 88(%rsp), %r12 # 8-byte Folded Reload
+ movq %rbx, %r13
+ sbbq 64(%rsp), %r13 # 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rbx, %r13
+ testb %cl, %cl
+ cmovneq %rax, %r8
+ movq -96(%rsp), %rax # 8-byte Reload
+ movq %r8, (%rax)
+ cmovneq %rdx, %r10
+ movq %r10, 8(%rax)
+ cmovneq %rbp, %r11
+ movq %r11, 16(%rax)
+ cmovneq %rsi, %r14
+ movq %r14, 24(%rax)
+ cmovneq %rdi, %r15
+ movq %r15, 32(%rax)
+ cmovneq %r9, %r12
+ movq %r12, 40(%rax)
+ movq %r13, 48(%rax)
+ addq $96, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end98:
+ .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L
+
+ .globl mcl_fp_montNF7L
+ .align 16, 0x90
+ .type mcl_fp_montNF7L,@function
+mcl_fp_montNF7L: # @mcl_fp_montNF7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $80, %rsp
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rdi, -96(%rsp) # 8-byte Spill
+ movq 48(%rsi), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq (%rdx), %rbp
+ mulq %rbp
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq %rdx, %r9
+ movq 40(%rsi), %rax
+ movq %rax, (%rsp) # 8-byte Spill
+ mulq %rbp
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq %rdx, %r11
+ movq 32(%rsi), %rax
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq 24(%rsi), %r8
+ movq %r8, -40(%rsp) # 8-byte Spill
+ movq 16(%rsi), %rbx
+ movq %rbx, -32(%rsp) # 8-byte Spill
+ movq (%rsi), %r10
+ movq %r10, -16(%rsp) # 8-byte Spill
+ movq 8(%rsi), %rsi
+ movq %rsi, -24(%rsp) # 8-byte Spill
+ mulq %rbp
+ movq %rdx, %rdi
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq %r8, %rax
+ mulq %rbp
+ movq %rdx, %r14
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rdx, %rbx
+ movq %rax, %r13
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %rsi
+ movq %rax, %r12
+ movq %r10, %rax
+ mulq %rbp
+ movq %rdx, %r8
+ addq %r12, %r8
+ adcq %r13, %rsi
+ movq %rsi, -104(%rsp) # 8-byte Spill
+ adcq %r15, %rbx
+ movq %rbx, -88(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, %r12
+ adcq 64(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -80(%rsp) # 8-byte Spill
+ adcq 72(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, -56(%rsp) # 8-byte Spill
+ adcq $0, %r9
+ movq %r9, -64(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, 24(%rsp) # 8-byte Spill
+ movq %rax, %r9
+ movq %rax, %r14
+ imulq %rdx, %r9
+ movq (%rcx), %r11
+ movq %r11, 32(%rsp) # 8-byte Spill
+ movq 48(%rcx), %rdx
+ movq %rdx, 72(%rsp) # 8-byte Spill
+ movq 40(%rcx), %r10
+ movq %r10, 64(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rbp
+ movq %rbp, 56(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rdi
+ movq %rdi, 40(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rsi
+ movq %rsi, -8(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq %rdx
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq %r10
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %r9, %rax
+ mulq %rbp
+ movq %rdx, -128(%rsp) # 8-byte Spill
+ movq %rax, %r13
+ movq %r9, %rax
+ mulq %rbx
+ movq %rdx, %rbx
+ movq %rax, %rbp
+ movq %r9, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ movq %rax, %rdi
+ movq %r9, %rax
+ mulq %rsi
+ movq %rdx, %r10
+ movq %rax, %rsi
+ movq %r9, %rax
+ mulq %r11
+ addq %r14, %rax
+ adcq %r8, %rsi
+ adcq -104(%rsp), %rdi # 8-byte Folded Reload
+ adcq -88(%rsp), %rbp # 8-byte Folded Reload
+ adcq %r12, %r13
+ adcq -80(%rsp), %r15 # 8-byte Folded Reload
+ movq -72(%rsp), %r8 # 8-byte Reload
+ adcq -56(%rsp), %r8 # 8-byte Folded Reload
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rsi
+ adcq %r10, %rdi
+ adcq %rcx, %rbp
+ adcq %rbx, %r13
+ adcq -128(%rsp), %r15 # 8-byte Folded Reload
+ adcq -120(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -72(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rbx
+ movq %rbx, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r12
+ addq %r14, %r12
+ adcq -128(%rsp), %rcx # 8-byte Folded Reload
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq -112(%rsp), %r8 # 8-byte Folded Reload
+ adcq -104(%rsp), %r11 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq -88(%rsp), %rdx # 8-byte Folded Reload
+ movq -56(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rsi, %r10
+ adcq %rdi, %r12
+ adcq %rbp, %rcx
+ adcq %r13, %r9
+ adcq %r15, %r8
+ adcq -72(%rsp), %r11 # 8-byte Folded Reload
+ adcq -64(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %r10, %rbx
+ imulq 24(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -128(%rsp) # 8-byte Spill
+ movq %rax, %rdi
+ movq %rbx, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %rbx, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ addq %r10, %rax
+ adcq %r12, %rsi
+ adcq %rcx, %rbp
+ adcq %r9, %rdi
+ adcq %r8, %r14
+ movq -72(%rsp), %rcx # 8-byte Reload
+ adcq %r11, %rcx
+ movq -64(%rsp), %r8 # 8-byte Reload
+ adcq -80(%rsp), %r8 # 8-byte Folded Reload
+ movq -56(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rsi
+ adcq %r13, %rbp
+ adcq %r15, %rdi
+ movq %rdi, -88(%rsp) # 8-byte Spill
+ adcq -128(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, -80(%rsp) # 8-byte Spill
+ adcq -120(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, -64(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rcx
+ movq %rcx, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rax, %r13
+ movq %rdx, %rcx
+ addq %r11, %rcx
+ adcq %r9, %r15
+ adcq %r12, %rbx
+ adcq -120(%rsp), %rdi # 8-byte Folded Reload
+ adcq -112(%rsp), %r14 # 8-byte Folded Reload
+ adcq -104(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r8
+ addq %rsi, %r13
+ adcq %rbp, %rcx
+ adcq -88(%rsp), %r15 # 8-byte Folded Reload
+ adcq -80(%rsp), %rbx # 8-byte Folded Reload
+ adcq -72(%rsp), %rdi # 8-byte Folded Reload
+ adcq -64(%rsp), %r14 # 8-byte Folded Reload
+ adcq -56(%rsp), %r10 # 8-byte Folded Reload
+ adcq $0, %r8
+ movq %r13, %r9
+ imulq 24(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %r9, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %r9, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, %r11
+ movq %r9, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %r9, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ movq %r9, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ addq %r13, %rax
+ adcq %rcx, %rsi
+ adcq %r15, %r12
+ adcq %rbx, %r11
+ adcq %rdi, %rbp
+ movq -72(%rsp), %rcx # 8-byte Reload
+ adcq %r14, %rcx
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq %r10, %rax
+ adcq $0, %r8
+ addq %rdx, %rsi
+ adcq -120(%rsp), %r12 # 8-byte Folded Reload
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ adcq -104(%rsp), %rbp # 8-byte Folded Reload
+ adcq -88(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -64(%rsp) # 8-byte Spill
+ adcq -56(%rsp), %r8 # 8-byte Folded Reload
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdi
+ movq %rdi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rax, %r14
+ movq %rdx, %rdi
+ addq %r9, %rdi
+ adcq %rbx, %rcx
+ adcq -120(%rsp), %r10 # 8-byte Folded Reload
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ adcq -104(%rsp), %r15 # 8-byte Folded Reload
+ movq -88(%rsp), %rdx # 8-byte Reload
+ adcq -80(%rsp), %rdx # 8-byte Folded Reload
+ movq -56(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rsi, %r14
+ adcq %r12, %rdi
+ adcq %r11, %rcx
+ adcq %rbp, %r10
+ adcq -72(%rsp), %r13 # 8-byte Folded Reload
+ adcq -64(%rsp), %r15 # 8-byte Folded Reload
+ adcq %r8, %rdx
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %r14, %rsi
+ imulq 24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r8
+ movq %rsi, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ addq %r14, %rax
+ adcq %rdi, %rbx
+ adcq %rcx, %rbp
+ adcq %r10, %r8
+ adcq %r13, %r12
+ movq -80(%rsp), %rsi # 8-byte Reload
+ adcq %r15, %rsi
+ movq -72(%rsp), %rcx # 8-byte Reload
+ adcq -88(%rsp), %rcx # 8-byte Folded Reload
+ movq -56(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rbx
+ adcq %r9, %rbp
+ adcq %r11, %r8
+ adcq -120(%rsp), %r12 # 8-byte Folded Reload
+ adcq -112(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -80(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -72(%rsp) # 8-byte Spill
+ adcq -64(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rsi
+ movq %rsi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rsi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %r10
+ addq %r15, %r10
+ adcq %r14, %rdi
+ adcq -128(%rsp), %rcx # 8-byte Folded Reload
+ adcq -120(%rsp), %r9 # 8-byte Folded Reload
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ movq -88(%rsp), %rdx # 8-byte Reload
+ adcq -104(%rsp), %rdx # 8-byte Folded Reload
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rbx, %r11
+ adcq %rbp, %r10
+ adcq %r8, %rdi
+ adcq %r12, %rcx
+ adcq -80(%rsp), %r9 # 8-byte Folded Reload
+ adcq -72(%rsp), %r13 # 8-byte Folded Reload
+ adcq -56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %r11, %rsi
+ imulq 24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r14
+ movq %rsi, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ addq %r11, %rax
+ adcq %r10, %rbx
+ adcq %rdi, %rbp
+ adcq %rcx, %r12
+ adcq %r9, %r14
+ movq -72(%rsp), %rdi # 8-byte Reload
+ adcq %r13, %rdi
+ movq -56(%rsp), %rcx # 8-byte Reload
+ adcq -88(%rsp), %rcx # 8-byte Folded Reload
+ movq -64(%rsp), %rax # 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rbx
+ adcq %r8, %rbp
+ adcq %r15, %r12
+ adcq -120(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, -88(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rcx
+ movq %rcx, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -104(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %r10
+ addq %r14, %r10
+ adcq %r15, %r8
+ adcq -128(%rsp), %rdi # 8-byte Folded Reload
+ adcq -120(%rsp), %rsi # 8-byte Folded Reload
+ adcq -112(%rsp), %r13 # 8-byte Folded Reload
+ movq -80(%rsp), %rax # 8-byte Reload
+ adcq -104(%rsp), %rax # 8-byte Folded Reload
+ adcq $0, %r9
+ addq %rbx, %r11
+ adcq %rbp, %r10
+ adcq %r12, %r8
+ adcq -88(%rsp), %rdi # 8-byte Folded Reload
+ adcq -72(%rsp), %rsi # 8-byte Folded Reload
+ adcq -56(%rsp), %r13 # 8-byte Folded Reload
+ adcq -64(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -80(%rsp) # 8-byte Spill
+ adcq $0, %r9
+ movq %r11, %rbx
+ imulq 24(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, -56(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, -104(%rsp) # 8-byte Spill
+ movq %rax, -64(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -112(%rsp) # 8-byte Spill
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -120(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq -8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rcx
+ movq %rbx, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ addq %r11, %rax
+ adcq %r10, %rcx
+ adcq %r8, %rbp
+ adcq %rdi, %r15
+ adcq %rsi, %r12
+ movq -64(%rsp), %rsi # 8-byte Reload
+ adcq %r13, %rsi
+ movq -56(%rsp), %rax # 8-byte Reload
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ adcq $0, %r9
+ addq %rdx, %rcx
+ adcq %r14, %rbp
+ adcq -120(%rsp), %r15 # 8-byte Folded Reload
+ adcq -72(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, -72(%rsp) # 8-byte Spill
+ adcq -112(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -64(%rsp) # 8-byte Spill
+ adcq -104(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -56(%rsp) # 8-byte Spill
+ adcq -88(%rsp), %r9 # 8-byte Folded Reload
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdi
+ movq %rdi, %rax
+ mulq 8(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rax, (%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -48(%rsp) # 8-byte Folded Reload
+ movq %rdx, 8(%rsp) # 8-byte Spill
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %rsi
+ movq %rdi, %rax
+ mulq -24(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rax, %r12
+ movq %rdx, %r8
+ addq %rbx, %r8
+ adcq %rsi, %r10
+ adcq -40(%rsp), %r11 # 8-byte Folded Reload
+ adcq -48(%rsp), %r13 # 8-byte Folded Reload
+ movq 8(%rsp), %rdx # 8-byte Reload
+ adcq (%rsp), %rdx # 8-byte Folded Reload
+ movq 16(%rsp), %rax # 8-byte Reload
+ adcq -80(%rsp), %rax # 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rcx, %r12
+ adcq %rbp, %r8
+ adcq %r15, %r10
+ adcq -72(%rsp), %r11 # 8-byte Folded Reload
+ adcq -64(%rsp), %r13 # 8-byte Folded Reload
+ adcq -56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 8(%rsp) # 8-byte Spill
+ adcq %r9, %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq $0, %r14
+ movq 24(%rsp), %rdi # 8-byte Reload
+ imulq %r12, %rdi
+ movq %rdi, %rax
+ mulq 72(%rsp) # 8-byte Folded Reload
+ movq %rdx, 24(%rsp) # 8-byte Spill
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq 64(%rsp) # 8-byte Folded Reload
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rax, %rsi
+ movq %rdi, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, %rcx
+ movq %rdi, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, %rbx
+ movq %rdi, %rax
+ movq -8(%rsp), %rdi # 8-byte Reload
+ mulq %rdi
+ addq %r12, %r15
+ adcq %r8, %rax
+ adcq %r10, %rbx
+ adcq %r11, %rcx
+ adcq %r13, %rsi
+ adcq 8(%rsp), %rbp # 8-byte Folded Reload
+ adcq 16(%rsp), %r9 # 8-byte Folded Reload
+ adcq $0, %r14
+ addq -32(%rsp), %rax # 8-byte Folded Reload
+ adcq %rdx, %rbx
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ adcq -24(%rsp), %rsi # 8-byte Folded Reload
+ adcq -16(%rsp), %rbp # 8-byte Folded Reload
+ adcq (%rsp), %r9 # 8-byte Folded Reload
+ adcq 24(%rsp), %r14 # 8-byte Folded Reload
+ movq %rax, %r13
+ subq 32(%rsp), %r13 # 8-byte Folded Reload
+ movq %rbx, %r12
+ sbbq %rdi, %r12
+ movq %rcx, %r8
+ sbbq 40(%rsp), %r8 # 8-byte Folded Reload
+ movq %rsi, %r10
+ sbbq 48(%rsp), %r10 # 8-byte Folded Reload
+ movq %rbp, %r11
+ sbbq 56(%rsp), %r11 # 8-byte Folded Reload
+ movq %r9, %r15
+ sbbq 64(%rsp), %r15 # 8-byte Folded Reload
+ movq %r14, %rdx
+ sbbq 72(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, %rdi
+ sarq $63, %rdi
+ cmovsq %rax, %r13
+ movq -96(%rsp), %rax # 8-byte Reload
+ movq %r13, (%rax)
+ cmovsq %rbx, %r12
+ movq %r12, 8(%rax)
+ cmovsq %rcx, %r8
+ movq %r8, 16(%rax)
+ cmovsq %rsi, %r10
+ movq %r10, 24(%rax)
+ cmovsq %rbp, %r11
+ movq %r11, 32(%rax)
+ cmovsq %r9, %r15
+ movq %r15, 40(%rax)
+ cmovsq %r14, %rdx
+ movq %rdx, 48(%rax)
+ addq $80, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end99:
+ .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L
+
+ .globl mcl_fp_montRed7L
+ .align 16, 0x90
+ .type mcl_fp_montRed7L,@function
+mcl_fp_montRed7L: # @mcl_fp_montRed7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $64, %rsp
+ movq %rdx, %rcx
+ movq %rdi, -104(%rsp) # 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq (%rcx), %rdx
+ movq %rdx, 32(%rsp) # 8-byte Spill
+ movq (%rsi), %rbp
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ imulq %rax, %rbp
+ movq 48(%rcx), %rdx
+ movq %rdx, -16(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq 40(%rcx), %rdx
+ movq %rdx, (%rsp) # 8-byte Spill
+ movq 32(%rcx), %r10
+ movq %r10, 56(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rdi
+ movq %rdi, 48(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, 16(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rdx, %r13
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq %r10
+ movq %rdx, %r15
+ movq %rax, %r11
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rdx, %r10
+ movq %rax, %r8
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, %r14
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, %r12
+ movq %rax, %rdi
+ movq %rbp, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ addq %rdi, %rbp
+ adcq %rbx, %r12
+ adcq %r8, %r14
+ adcq %r11, %r10
+ adcq %r9, %r15
+ adcq -8(%rsp), %r13 # 8-byte Folded Reload
+ movq -48(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq 24(%rsp), %rax # 8-byte Folded Reload
+ adcq 8(%rsi), %rbp
+ adcq 16(%rsi), %r12
+ adcq 24(%rsi), %r14
+ adcq 32(%rsi), %r10
+ adcq 40(%rsi), %r15
+ adcq 48(%rsi), %r13
+ movq %r13, -80(%rsp) # 8-byte Spill
+ adcq 56(%rsi), %rdx
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq 104(%rsi), %r8
+ movq 96(%rsi), %rdx
+ movq 88(%rsi), %rdi
+ movq 80(%rsi), %rbx
+ movq 72(%rsi), %rax
+ movq 64(%rsi), %rsi
+ adcq $0, %rsi
+ movq %rsi, -88(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -96(%rsp) # 8-byte Spill
+ adcq $0, %rbx
+ movq %rbx, -40(%rsp) # 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -32(%rsp) # 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, -8(%rsp) # 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq %rbp, %rdi
+ imulq 8(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -128(%rsp) # 8-byte Spill
+ movq %rdi, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rdx, %r8
+ movq %rax, %rcx
+ movq %rdi, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ addq %rcx, %r9
+ adcq %r11, %r8
+ adcq %rbx, %rsi
+ adcq -128(%rsp), %r13 # 8-byte Folded Reload
+ movq -72(%rsp), %rdi # 8-byte Reload
+ adcq -120(%rsp), %rdi # 8-byte Folded Reload
+ movq -64(%rsp), %rdx # 8-byte Reload
+ adcq -112(%rsp), %rdx # 8-byte Folded Reload
+ movq -56(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq %rbp, %rax
+ adcq %r12, %r9
+ adcq %r14, %r8
+ adcq %r10, %rsi
+ adcq %r15, %r13
+ adcq -80(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, -72(%rsp) # 8-byte Spill
+ adcq -48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ adcq -88(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -56(%rsp) # 8-byte Spill
+ adcq $0, -96(%rsp) # 8-byte Folded Spill
+ adcq $0, -40(%rsp) # 8-byte Folded Spill
+ adcq $0, -32(%rsp) # 8-byte Folded Spill
+ adcq $0, -24(%rsp) # 8-byte Folded Spill
+ movq -8(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, 24(%rsp) # 8-byte Folded Spill
+ movq %r9, %rcx
+ imulq 8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -88(%rsp) # 8-byte Spill
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ addq %r14, %r10
+ adcq %r12, %rdi
+ adcq %r11, %rbp
+ adcq -120(%rsp), %r15 # 8-byte Folded Reload
+ movq -88(%rsp), %r11 # 8-byte Reload
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ movq -80(%rsp), %rdx # 8-byte Reload
+ adcq -8(%rsp), %rdx # 8-byte Folded Reload
+ movq -48(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq %r9, %rax
+ adcq %r8, %r10
+ adcq %rsi, %rdi
+ adcq %r13, %rbp
+ adcq -72(%rsp), %r15 # 8-byte Folded Reload
+ adcq -64(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, -88(%rsp) # 8-byte Spill
+ adcq -56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -80(%rsp) # 8-byte Spill
+ adcq -96(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -48(%rsp) # 8-byte Spill
+ adcq $0, -40(%rsp) # 8-byte Folded Spill
+ adcq $0, -32(%rsp) # 8-byte Folded Spill
+ adcq $0, -24(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, -8(%rsp) # 8-byte Spill
+ adcq $0, 24(%rsp) # 8-byte Folded Spill
+ movq %r10, %rbx
+ imulq 8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %rax
+ movq -16(%rsp), %r12 # 8-byte Reload
+ mulq %r12
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -112(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -72(%rsp) # 8-byte Spill
+ movq %rax, -120(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ addq %r11, %r9
+ adcq %r13, %rcx
+ adcq %r14, %rsi
+ adcq -120(%rsp), %r8 # 8-byte Folded Reload
+ movq -72(%rsp), %r11 # 8-byte Reload
+ adcq -112(%rsp), %r11 # 8-byte Folded Reload
+ movq -64(%rsp), %rbx # 8-byte Reload
+ adcq -96(%rsp), %rbx # 8-byte Folded Reload
+ movq -56(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r10, %rax
+ adcq %rdi, %r9
+ adcq %rbp, %rcx
+ adcq %r15, %rsi
+ adcq -88(%rsp), %r8 # 8-byte Folded Reload
+ adcq -80(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, -72(%rsp) # 8-byte Spill
+ adcq -48(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, -64(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ adcq $0, -32(%rsp) # 8-byte Folded Spill
+ adcq $0, -24(%rsp) # 8-byte Folded Spill
+ adcq $0, -8(%rsp) # 8-byte Folded Spill
+ adcq $0, 24(%rsp) # 8-byte Folded Spill
+ movq %r9, %rbp
+ imulq 8(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq %r12
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq (%rsp) # 8-byte Folded Reload
+ movq %rdx, -48(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r11
+ movq %rbp, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r12
+ movq %rbp, %rax
+ movq 32(%rsp), %rbp # 8-byte Reload
+ mulq %rbp
+ movq %rdx, %r10
+ addq %r12, %r10
+ adcq %r11, %rbx
+ adcq %r14, %rdi
+ adcq -96(%rsp), %r13 # 8-byte Folded Reload
+ adcq -88(%rsp), %r15 # 8-byte Folded Reload
+ movq -48(%rsp), %r11 # 8-byte Reload
+ adcq -80(%rsp), %r11 # 8-byte Folded Reload
+ movq -40(%rsp), %rdx # 8-byte Reload
+ adcq $0, %rdx
+ addq %r9, %rax
+ adcq %rcx, %r10
+ adcq %rsi, %rbx
+ adcq %r8, %rdi
+ adcq -72(%rsp), %r13 # 8-byte Folded Reload
+ adcq -64(%rsp), %r15 # 8-byte Folded Reload
+ adcq -56(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, -48(%rsp) # 8-byte Spill
+ adcq -32(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -40(%rsp) # 8-byte Spill
+ adcq $0, -24(%rsp) # 8-byte Folded Spill
+ adcq $0, -8(%rsp) # 8-byte Folded Spill
+ adcq $0, 24(%rsp) # 8-byte Folded Spill
+ movq %r10, %rsi
+ imulq 8(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, -72(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ movq (%rsp), %r8 # 8-byte Reload
+ mulq %r8
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ movq %rax, -80(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, -64(%rsp) # 8-byte Spill
+ movq %rax, -88(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -96(%rsp) # 8-byte Spill
+ movq %rsi, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rcx
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %rbp
+ addq %rcx, %rbp
+ adcq %r11, %r14
+ adcq -96(%rsp), %r9 # 8-byte Folded Reload
+ adcq -88(%rsp), %r12 # 8-byte Folded Reload
+ movq -64(%rsp), %rsi # 8-byte Reload
+ adcq -80(%rsp), %rsi # 8-byte Folded Reload
+ movq -56(%rsp), %rdx # 8-byte Reload
+ adcq -72(%rsp), %rdx # 8-byte Folded Reload
+ movq -32(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ addq %r10, %rax
+ adcq %rbx, %rbp
+ adcq %rdi, %r14
+ adcq %r13, %r9
+ adcq %r15, %r12
+ adcq -48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, -64(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -56(%rsp) # 8-byte Spill
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -32(%rsp) # 8-byte Spill
+ adcq $0, -8(%rsp) # 8-byte Folded Spill
+ adcq $0, 24(%rsp) # 8-byte Folded Spill
+ movq 8(%rsp), %rcx # 8-byte Reload
+ imulq %rbp, %rcx
+ movq %rcx, %rax
+ mulq -16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq %r8
+ movq %rdx, %r13
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 56(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 48(%rsp) # 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -48(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 40(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 16(%rsp) # 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 32(%rsp) # 8-byte Folded Reload
+ addq %r11, %rdx
+ adcq %r8, %rbx
+ adcq -48(%rsp), %rdi # 8-byte Folded Reload
+ adcq -40(%rsp), %r10 # 8-byte Folded Reload
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ adcq 8(%rsp), %r13 # 8-byte Folded Reload
+ adcq $0, %rsi
+ addq %rbp, %rax
+ adcq %r14, %rdx
+ adcq %r9, %rbx
+ adcq %r12, %rdi
+ adcq -64(%rsp), %r10 # 8-byte Folded Reload
+ adcq -56(%rsp), %r15 # 8-byte Folded Reload
+ adcq -32(%rsp), %r13 # 8-byte Folded Reload
+ adcq -8(%rsp), %rsi # 8-byte Folded Reload
+ movq 24(%rsp), %rcx # 8-byte Reload
+ adcq $0, %rcx
+ movq %rdx, %rax
+ subq 32(%rsp), %rax # 8-byte Folded Reload
+ movq %rbx, %rbp
+ sbbq 16(%rsp), %rbp # 8-byte Folded Reload
+ movq %rdi, %r8
+ sbbq 40(%rsp), %r8 # 8-byte Folded Reload
+ movq %r10, %r9
+ sbbq 48(%rsp), %r9 # 8-byte Folded Reload
+ movq %r15, %r11
+ sbbq 56(%rsp), %r11 # 8-byte Folded Reload
+ movq %r13, %r14
+ sbbq (%rsp), %r14 # 8-byte Folded Reload
+ movq %rsi, %r12
+ sbbq -16(%rsp), %r12 # 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rsi, %r12
+ testb %cl, %cl
+ cmovneq %rdx, %rax
+ movq -104(%rsp), %rcx # 8-byte Reload
+ movq %rax, (%rcx)
+ cmovneq %rbx, %rbp
+ movq %rbp, 8(%rcx)
+ cmovneq %rdi, %r8
+ movq %r8, 16(%rcx)
+ cmovneq %r10, %r9
+ movq %r9, 24(%rcx)
+ cmovneq %r15, %r11
+ movq %r11, 32(%rcx)
+ cmovneq %r13, %r14
+ movq %r14, 40(%rcx)
+ movq %r12, 48(%rcx)
+ addq $64, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end100:
+ .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L
+
+ .globl mcl_fp_addPre7L
+ .align 16, 0x90
+ .type mcl_fp_addPre7L,@function
+mcl_fp_addPre7L: # @mcl_fp_addPre7L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r8
+ movq 48(%rsi), %r14
+ movq 40(%rdx), %r9
+ movq 40(%rsi), %r15
+ movq 32(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r12
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rbx
+ adcq 16(%rsi), %r12
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r12, 16(%rdi)
+ adcq %r11, %rax
+ movq %rax, 24(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r9, %r15
+ movq %r15, 40(%rdi)
+ adcq %r8, %r14
+ movq %r14, 48(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end101:
+ .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L
+
+ .globl mcl_fp_subPre7L
+ .align 16, 0x90
+ .type mcl_fp_subPre7L,@function
+mcl_fp_subPre7L: # @mcl_fp_subPre7L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r8
+ movq 48(%rsi), %r10
+ movq 40(%rdx), %r9
+ movq 40(%rsi), %r15
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %r12
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %r12
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq 32(%rsi), %rdx
+ movq 24(%rsi), %rsi
+ movq %rbx, (%rdi)
+ movq %r12, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ sbbq %r14, %rdx
+ movq %rdx, 32(%rdi)
+ sbbq %r9, %r15
+ movq %r15, 40(%rdi)
+ sbbq %r8, %r10
+ movq %r10, 48(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end102:
+ .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L
+
+ .globl mcl_fp_shr1_7L
+ .align 16, 0x90
+ .type mcl_fp_shr1_7L,@function
+mcl_fp_shr1_7L: # @mcl_fp_shr1_7L
+# BB#0:
+ movq 48(%rsi), %r8
+ movq 40(%rsi), %r9
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %rax
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rdx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rdx
+ movq %rdx, (%rdi)
+ shrdq $1, %rcx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rax, %rcx
+ movq %rcx, 16(%rdi)
+ shrdq $1, %r10, %rax
+ movq %rax, 24(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 32(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 40(%rdi)
+ shrq %r8
+ movq %r8, 48(%rdi)
+ retq
+.Lfunc_end103:
+ .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L
+
+ .globl mcl_fp_add7L
+ .align 16, 0x90
+ .type mcl_fp_add7L,@function
+mcl_fp_add7L: # @mcl_fp_add7L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r14
+ movq 48(%rsi), %r8
+ movq 40(%rdx), %r15
+ movq 40(%rsi), %r9
+ movq 32(%rdx), %r12
+ movq 24(%rdx), %r13
+ movq 16(%rdx), %r10
+ movq (%rdx), %r11
+ movq 8(%rdx), %rdx
+ addq (%rsi), %r11
+ adcq 8(%rsi), %rdx
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rbx
+ adcq 16(%rsi), %r10
+ movq %r11, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ adcq %r13, %rax
+ movq %rax, 24(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r15, %r9
+ movq %r9, 40(%rdi)
+ adcq %r14, %r8
+ movq %r8, 48(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %r11
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r10
+ sbbq 24(%rcx), %rax
+ sbbq 32(%rcx), %rbx
+ sbbq 40(%rcx), %r9
+ sbbq 48(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne .LBB104_2
+# BB#1: # %nocarry
+ movq %r11, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %rax, 24(%rdi)
+ movq %rbx, 32(%rdi)
+ movq %r9, 40(%rdi)
+ movq %r8, 48(%rdi)
+.LBB104_2: # %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end104:
+ .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L
+
+ .globl mcl_fp_addNF7L
+ .align 16, 0x90
+ .type mcl_fp_addNF7L,@function
+mcl_fp_addNF7L: # @mcl_fp_addNF7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rbp
+ movq 32(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r14
+ movq (%rdx), %r12
+ movq 8(%rdx), %r15
+ addq (%rsi), %r12
+ adcq 8(%rsi), %r15
+ adcq 16(%rsi), %r14
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %r10
+ adcq 40(%rsi), %rbp
+ movq %rbp, -8(%rsp) # 8-byte Spill
+ adcq 48(%rsi), %r9
+ movq %r12, %rsi
+ subq (%rcx), %rsi
+ movq %r15, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r14, %rax
+ sbbq 16(%rcx), %rax
+ movq %r11, %rbx
+ sbbq 24(%rcx), %rbx
+ movq %r10, %r13
+ sbbq 32(%rcx), %r13
+ sbbq 40(%rcx), %rbp
+ movq %r9, %r8
+ sbbq 48(%rcx), %r8
+ movq %r8, %rcx
+ sarq $63, %rcx
+ cmovsq %r12, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r15, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r14, %rax
+ movq %rax, 16(%rdi)
+ cmovsq %r11, %rbx
+ movq %rbx, 24(%rdi)
+ cmovsq %r10, %r13
+ movq %r13, 32(%rdi)
+ cmovsq -8(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 40(%rdi)
+ cmovsq %r9, %r8
+ movq %r8, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end105:
+ .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L
+
+ .globl mcl_fp_sub7L
+ .align 16, 0x90
+ .type mcl_fp_sub7L,@function
+mcl_fp_sub7L: # @mcl_fp_sub7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r14
+ movq 48(%rsi), %r8
+ movq 40(%rdx), %r15
+ movq 40(%rsi), %r9
+ movq 32(%rdx), %r12
+ movq (%rsi), %rax
+ movq 8(%rsi), %r11
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r11
+ movq 16(%rsi), %r13
+ sbbq 16(%rdx), %r13
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %rsi
+ sbbq 24(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r11, 8(%rdi)
+ movq %r13, 16(%rdi)
+ movq %rsi, 24(%rdi)
+ sbbq %r12, %r10
+ movq %r10, 32(%rdi)
+ sbbq %r15, %r9
+ movq %r9, 40(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 48(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB106_2
+# BB#1: # %carry
+ movq 48(%rcx), %r14
+ movq 40(%rcx), %r15
+ movq 32(%rcx), %r12
+ movq 24(%rcx), %rbx
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rbp
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r13, %rbp
+ movq %rbp, 16(%rdi)
+ adcq %rsi, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r10, %r12
+ movq %r12, 32(%rdi)
+ adcq %r9, %r15
+ movq %r15, 40(%rdi)
+ adcq %r8, %r14
+ movq %r14, 48(%rdi)
+.LBB106_2: # %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end106:
+ .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L
+
+ .globl mcl_fp_subNF7L
+ .align 16, 0x90
+ .type mcl_fp_subNF7L,@function
+mcl_fp_subNF7L: # @mcl_fp_subNF7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 48(%rsi), %r12
+ movq 40(%rsi), %rax
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r10
+ movq 16(%rsi), %r11
+ movq (%rsi), %r14
+ movq 8(%rsi), %r15
+ subq (%rdx), %r14
+ sbbq 8(%rdx), %r15
+ sbbq 16(%rdx), %r11
+ sbbq 24(%rdx), %r10
+ sbbq 32(%rdx), %r9
+ sbbq 40(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ sbbq 48(%rdx), %r12
+ movq %r12, %rax
+ sarq $63, %rax
+ movq %rax, %rsi
+ shldq $1, %r12, %rsi
+ andq (%r8), %rsi
+ movq 48(%r8), %r13
+ andq %rax, %r13
+ movq 40(%r8), %rbx
+ andq %rax, %rbx
+ movq 32(%r8), %rdx
+ andq %rax, %rdx
+ movq 24(%r8), %rbp
+ andq %rax, %rbp
+ movq 16(%r8), %rcx
+ andq %rax, %rcx
+ andq 8(%r8), %rax
+ addq %r14, %rsi
+ adcq %r15, %rax
+ movq %rsi, (%rdi)
+ movq %rax, 8(%rdi)
+ adcq %r11, %rcx
+ movq %rcx, 16(%rdi)
+ adcq %r10, %rbp
+ movq %rbp, 24(%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 32(%rdi)
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 40(%rdi)
+ adcq %r12, %r13
+ movq %r13, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end107:
+ .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L
+
+ .globl mcl_fpDbl_add7L
+ .align 16, 0x90
+ .type mcl_fpDbl_add7L,@function
+mcl_fpDbl_add7L: # @mcl_fpDbl_add7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 104(%rdx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 96(%rdx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 88(%rdx), %r11
+ movq 80(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r12
+ movq 16(%rdx), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %rbx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %r9
+ adcq 24(%rdx), %r15
+ adcq 32(%rdx), %r12
+ movq 72(%rdx), %r13
+ movq 64(%rdx), %rbp
+ movq %rax, (%rdi)
+ movq 56(%rdx), %r10
+ movq %rbx, 8(%rdi)
+ movq 48(%rdx), %rcx
+ movq 40(%rdx), %rdx
+ movq %r9, 16(%rdi)
+ movq 104(%rsi), %r9
+ movq %r15, 24(%rdi)
+ movq 40(%rsi), %rbx
+ adcq %rdx, %rbx
+ movq 96(%rsi), %r15
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %rdx
+ adcq %rcx, %rdx
+ movq 88(%rsi), %rax
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rcx
+ adcq %r10, %rcx
+ movq 80(%rsi), %r12
+ movq %rdx, 48(%rdi)
+ movq 72(%rsi), %rdx
+ movq 64(%rsi), %rsi
+ adcq %rbp, %rsi
+ adcq %r13, %rdx
+ adcq %r14, %r12
+ adcq %r11, %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ adcq -24(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, -24(%rsp) # 8-byte Spill
+ adcq -16(%rsp), %r9 # 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq %rcx, %rbx
+ subq (%r8), %rbx
+ movq %rsi, %r10
+ sbbq 8(%r8), %r10
+ movq %rdx, %r11
+ sbbq 16(%r8), %r11
+ movq %r12, %r14
+ sbbq 24(%r8), %r14
+ movq -8(%rsp), %r13 # 8-byte Reload
+ sbbq 32(%r8), %r13
+ sbbq 40(%r8), %r15
+ movq %r9, %rax
+ sbbq 48(%r8), %rax
+ sbbq $0, %rbp
+ andl $1, %ebp
+ cmovneq %rcx, %rbx
+ movq %rbx, 56(%rdi)
+ testb %bpl, %bpl
+ cmovneq %rsi, %r10
+ movq %r10, 64(%rdi)
+ cmovneq %rdx, %r11
+ movq %r11, 72(%rdi)
+ cmovneq %r12, %r14
+ movq %r14, 80(%rdi)
+ cmovneq -8(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 88(%rdi)
+ cmovneq -24(%rsp), %r15 # 8-byte Folded Reload
+ movq %r15, 96(%rdi)
+ cmovneq %r9, %rax
+ movq %rax, 104(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end108:
+ .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L
+
+ .globl mcl_fpDbl_sub7L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub7L,@function
+mcl_fpDbl_sub7L: # @mcl_fpDbl_sub7L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 104(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 96(%rdx), %r10
+ movq 88(%rdx), %r14
+ movq 16(%rsi), %rax
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %ecx, %ecx
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %rax
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 80(%rdx), %r13
+ movq 72(%rdx), %rbp
+ movq %r15, (%rdi)
+ movq 64(%rdx), %r9
+ movq %r11, 8(%rdi)
+ movq 56(%rdx), %r15
+ movq %rax, 16(%rdi)
+ movq 48(%rdx), %r11
+ movq 40(%rdx), %rdx
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %rdx, %rbx
+ movq 104(%rsi), %rax
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %r12
+ sbbq %r11, %r12
+ movq 96(%rsi), %r11
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rdx
+ sbbq %r15, %rdx
+ movq 88(%rsi), %r15
+ movq %r12, 48(%rdi)
+ movq 64(%rsi), %rbx
+ sbbq %r9, %rbx
+ movq 80(%rsi), %r12
+ movq 72(%rsi), %r9
+ sbbq %rbp, %r9
+ sbbq %r13, %r12
+ sbbq %r14, %r15
+ sbbq %r10, %r11
+ sbbq -8(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%r8), %r10
+ cmoveq %rcx, %r10
+ testb %bpl, %bpl
+ movq 16(%r8), %rbp
+ cmoveq %rcx, %rbp
+ movq 8(%r8), %rsi
+ cmoveq %rcx, %rsi
+ movq 48(%r8), %r14
+ cmoveq %rcx, %r14
+ movq 40(%r8), %r13
+ cmoveq %rcx, %r13
+ movq 32(%r8), %rax
+ cmoveq %rcx, %rax
+ cmovneq 24(%r8), %rcx
+ addq %rdx, %r10
+ adcq %rbx, %rsi
+ movq %r10, 56(%rdi)
+ movq %rsi, 64(%rdi)
+ adcq %r9, %rbp
+ movq %rbp, 72(%rdi)
+ adcq %r12, %rcx
+ movq %rcx, 80(%rdi)
+ adcq %r15, %rax
+ movq %rax, 88(%rdi)
+ adcq %r11, %r13
+ movq %r13, 96(%rdi)
+ adcq -8(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, 104(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end109:
+ .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L
+
+ .align 16, 0x90
+ .type .LmulPv512x64,@function
+.LmulPv512x64: # @mulPv512x64
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ movq %rax, (%rdi)
+ movq %rcx, %rax
+ mulq 56(%rsi)
+ movq %rdx, %r10
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 48(%rsi)
+ movq %rdx, %r11
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rcx, %rax
+ mulq 40(%rsi)
+ movq %rdx, %r12
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %r13
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r9
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ addq -24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 8(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r8, %r9
+ movq %r9, 24(%rdi)
+ adcq %r13, %rbp
+ movq %rbp, 32(%rdi)
+ adcq %r15, %rbx
+ movq %rbx, 40(%rdi)
+ adcq -16(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, 48(%rdi)
+ adcq -8(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 56(%rdi)
+ adcq $0, %r10
+ movq %r10, 64(%rdi)
+ movq %rdi, %rax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end110:
+ .size .LmulPv512x64, .Lfunc_end110-.LmulPv512x64
+
+ .globl mcl_fp_mulUnitPre8L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre8L,@function
+mcl_fp_mulUnitPre8L: # @mcl_fp_mulUnitPre8L
+# BB#0:
+ pushq %rbx
+ subq $80, %rsp
+ movq %rdi, %rbx
+ leaq 8(%rsp), %rdi
+ callq .LmulPv512x64
+ movq 72(%rsp), %r8
+ movq 64(%rsp), %r9
+ movq 56(%rsp), %r10
+ movq 48(%rsp), %r11
+ movq 40(%rsp), %rdi
+ movq 32(%rsp), %rax
+ movq 24(%rsp), %rcx
+ movq 8(%rsp), %rdx
+ movq 16(%rsp), %rsi
+ movq %rdx, (%rbx)
+ movq %rsi, 8(%rbx)
+ movq %rcx, 16(%rbx)
+ movq %rax, 24(%rbx)
+ movq %rdi, 32(%rbx)
+ movq %r11, 40(%rbx)
+ movq %r10, 48(%rbx)
+ movq %r9, 56(%rbx)
+ movq %r8, 64(%rbx)
+ addq $80, %rsp
+ popq %rbx
+ retq
+.Lfunc_end111:
+ .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L
+
+ .globl mcl_fpDbl_mulPre8L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre8L,@function
+mcl_fpDbl_mulPre8L: # @mcl_fpDbl_mulPre8L
+# BB#0:
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $200, %rsp
+ movq %rdx, %rbx
+ movq %rsi, %r15
+ movq %rdi, %r14
+ callq mcl_fpDbl_mulPre4L@PLT
+ leaq 64(%r14), %rdi
+ leaq 32(%r15), %rsi
+ leaq 32(%rbx), %rdx
+ callq mcl_fpDbl_mulPre4L@PLT
+ movq 56(%rbx), %r10
+ movq 48(%rbx), %rcx
+ movq (%rbx), %rdx
+ movq 8(%rbx), %rsi
+ addq 32(%rbx), %rdx
+ adcq 40(%rbx), %rsi
+ adcq 16(%rbx), %rcx
+ adcq 24(%rbx), %r10
+ pushfq
+ popq %r8
+ xorl %r9d, %r9d
+ movq 56(%r15), %rdi
+ movq 48(%r15), %r13
+ movq (%r15), %r12
+ movq 8(%r15), %rbx
+ addq 32(%r15), %r12
+ adcq 40(%r15), %rbx
+ adcq 16(%r15), %r13
+ adcq 24(%r15), %rdi
+ movl $0, %eax
+ cmovbq %r10, %rax
+ movq %rax, -176(%rbp) # 8-byte Spill
+ movl $0, %eax
+ cmovbq %rcx, %rax
+ movq %rax, -184(%rbp) # 8-byte Spill
+ movl $0, %eax
+ cmovbq %rsi, %rax
+ movq %rax, -192(%rbp) # 8-byte Spill
+ movl $0, %eax
+ cmovbq %rdx, %rax
+ movq %rax, -200(%rbp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq %r12, -136(%rbp)
+ movq %rbx, -128(%rbp)
+ movq %r13, -120(%rbp)
+ movq %rdi, -112(%rbp)
+ movq %rdx, -168(%rbp)
+ movq %rsi, -160(%rbp)
+ movq %rcx, -152(%rbp)
+ movq %r10, -144(%rbp)
+ pushq %r8
+ popfq
+ cmovaeq %r9, %rdi
+ movq %rdi, -216(%rbp) # 8-byte Spill
+ cmovaeq %r9, %r13
+ cmovaeq %r9, %rbx
+ cmovaeq %r9, %r12
+ sbbq %rax, %rax
+ movq %rax, -208(%rbp) # 8-byte Spill
+ leaq -104(%rbp), %rdi
+ leaq -136(%rbp), %rsi
+ leaq -168(%rbp), %rdx
+ callq mcl_fpDbl_mulPre4L@PLT
+ addq -200(%rbp), %r12 # 8-byte Folded Reload
+ adcq -192(%rbp), %rbx # 8-byte Folded Reload
+ adcq -184(%rbp), %r13 # 8-byte Folded Reload
+ movq -216(%rbp), %r10 # 8-byte Reload
+ adcq -176(%rbp), %r10 # 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq -208(%rbp), %rdx # 8-byte Reload
+ andl %edx, %r15d
+ andl $1, %r15d
+ addq -72(%rbp), %r12
+ adcq -64(%rbp), %rbx
+ adcq -56(%rbp), %r13
+ adcq -48(%rbp), %r10
+ adcq %rax, %r15
+ movq -80(%rbp), %rax
+ movq -88(%rbp), %rcx
+ movq -104(%rbp), %rsi
+ movq -96(%rbp), %rdx
+ subq (%r14), %rsi
+ sbbq 8(%r14), %rdx
+ sbbq 16(%r14), %rcx
+ sbbq 24(%r14), %rax
+ movq 32(%r14), %rdi
+ movq %rdi, -184(%rbp) # 8-byte Spill
+ movq 40(%r14), %r8
+ movq %r8, -176(%rbp) # 8-byte Spill
+ sbbq %rdi, %r12
+ sbbq %r8, %rbx
+ movq 48(%r14), %rdi
+ movq %rdi, -192(%rbp) # 8-byte Spill
+ sbbq %rdi, %r13
+ movq 56(%r14), %rdi
+ movq %rdi, -200(%rbp) # 8-byte Spill
+ sbbq %rdi, %r10
+ sbbq $0, %r15
+ movq 64(%r14), %r11
+ subq %r11, %rsi
+ movq 72(%r14), %rdi
+ movq %rdi, -208(%rbp) # 8-byte Spill
+ sbbq %rdi, %rdx
+ movq 80(%r14), %rdi
+ movq %rdi, -216(%rbp) # 8-byte Spill
+ sbbq %rdi, %rcx
+ movq 88(%r14), %rdi
+ movq %rdi, -224(%rbp) # 8-byte Spill
+ sbbq %rdi, %rax
+ movq 96(%r14), %rdi
+ movq %rdi, -232(%rbp) # 8-byte Spill
+ sbbq %rdi, %r12
+ movq 104(%r14), %rdi
+ sbbq %rdi, %rbx
+ movq 112(%r14), %r8
+ sbbq %r8, %r13
+ movq 120(%r14), %r9
+ sbbq %r9, %r10
+ sbbq $0, %r15
+ addq -184(%rbp), %rsi # 8-byte Folded Reload
+ adcq -176(%rbp), %rdx # 8-byte Folded Reload
+ movq %rsi, 32(%r14)
+ adcq -192(%rbp), %rcx # 8-byte Folded Reload
+ movq %rdx, 40(%r14)
+ adcq -200(%rbp), %rax # 8-byte Folded Reload
+ movq %rcx, 48(%r14)
+ adcq %r11, %r12
+ movq %rax, 56(%r14)
+ movq %r12, 64(%r14)
+ adcq -208(%rbp), %rbx # 8-byte Folded Reload
+ movq %rbx, 72(%r14)
+ adcq -216(%rbp), %r13 # 8-byte Folded Reload
+ movq %r13, 80(%r14)
+ adcq -224(%rbp), %r10 # 8-byte Folded Reload
+ movq %r10, 88(%r14)
+ adcq -232(%rbp), %r15 # 8-byte Folded Reload
+ movq %r15, 96(%r14)
+ adcq $0, %rdi
+ movq %rdi, 104(%r14)
+ adcq $0, %r8
+ movq %r8, 112(%r14)
+ adcq $0, %r9
+ movq %r9, 120(%r14)
+ addq $200, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end112:
+ .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L
+
+ .globl mcl_fpDbl_sqrPre8L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre8L,@function
+mcl_fpDbl_sqrPre8L: # @mcl_fpDbl_sqrPre8L
+# BB#0:
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $200, %rsp
+ movq %rsi, %r14
+ movq %rdi, %rbx
+ movq %r14, %rdx
+ callq mcl_fpDbl_mulPre4L@PLT
+ leaq 64(%rbx), %rdi
+ leaq 32(%r14), %rsi
+ movq %rsi, %rdx
+ callq mcl_fpDbl_mulPre4L@PLT
+ movq (%r14), %r12
+ movq 8(%r14), %r15
+ addq 32(%r14), %r12
+ adcq 40(%r14), %r15
+ pushfq
+ popq %rax
+ movq %r12, -136(%rbp)
+ movq %r12, -168(%rbp)
+ addq %r12, %r12
+ movq %r15, -128(%rbp)
+ movq %r15, -160(%rbp)
+ adcq %r15, %r15
+ pushfq
+ popq %rcx
+ movq 56(%r14), %r13
+ movq 48(%r14), %rdx
+ pushq %rax
+ popfq
+ adcq 16(%r14), %rdx
+ adcq 24(%r14), %r13
+ pushfq
+ popq %r8
+ pushfq
+ popq %rsi
+ pushfq
+ popq %rdi
+ sbbq %rax, %rax
+ movq %rax, -184(%rbp) # 8-byte Spill
+ xorl %eax, %eax
+ pushq %rdi
+ popfq
+ cmovaeq %rax, %r15
+ movq %r15, -176(%rbp) # 8-byte Spill
+ cmovaeq %rax, %r12
+ movq %rdx, -120(%rbp)
+ movq %rdx, -152(%rbp)
+ movq %rdx, %r15
+ pushq %rcx
+ popfq
+ adcq %r15, %r15
+ movq %r13, %r14
+ movq %r13, -112(%rbp)
+ movq %r13, -144(%rbp)
+ adcq %r13, %r13
+ pushq %rsi
+ popfq
+ cmovaeq %rax, %r13
+ cmovaeq %rax, %r15
+ shrq $63, %r14
+ pushq %r8
+ popfq
+ cmovaeq %rax, %r14
+ leaq -104(%rbp), %rdi
+ leaq -136(%rbp), %rsi
+ leaq -168(%rbp), %rdx
+ callq mcl_fpDbl_mulPre4L@PLT
+ movq -184(%rbp), %rax # 8-byte Reload
+ andl $1, %eax
+ addq -72(%rbp), %r12
+ movq -176(%rbp), %r8 # 8-byte Reload
+ adcq -64(%rbp), %r8
+ adcq -56(%rbp), %r15
+ adcq -48(%rbp), %r13
+ adcq %r14, %rax
+ movq %rax, %rdi
+ movq -80(%rbp), %rax
+ movq -88(%rbp), %rcx
+ movq -104(%rbp), %rsi
+ movq -96(%rbp), %rdx
+ subq (%rbx), %rsi
+ sbbq 8(%rbx), %rdx
+ sbbq 16(%rbx), %rcx
+ sbbq 24(%rbx), %rax
+ movq 32(%rbx), %r10
+ movq %r10, -184(%rbp) # 8-byte Spill
+ movq 40(%rbx), %r9
+ movq %r9, -176(%rbp) # 8-byte Spill
+ sbbq %r10, %r12
+ sbbq %r9, %r8
+ movq %r8, %r10
+ movq 48(%rbx), %r8
+ movq %r8, -192(%rbp) # 8-byte Spill
+ sbbq %r8, %r15
+ movq 56(%rbx), %r8
+ movq %r8, -200(%rbp) # 8-byte Spill
+ sbbq %r8, %r13
+ sbbq $0, %rdi
+ movq 64(%rbx), %r11
+ subq %r11, %rsi
+ movq 72(%rbx), %r8
+ movq %r8, -208(%rbp) # 8-byte Spill
+ sbbq %r8, %rdx
+ movq 80(%rbx), %r8
+ movq %r8, -216(%rbp) # 8-byte Spill
+ sbbq %r8, %rcx
+ movq 88(%rbx), %r8
+ movq %r8, -224(%rbp) # 8-byte Spill
+ sbbq %r8, %rax
+ movq 96(%rbx), %r8
+ movq %r8, -232(%rbp) # 8-byte Spill
+ sbbq %r8, %r12
+ movq 104(%rbx), %r14
+ sbbq %r14, %r10
+ movq 112(%rbx), %r8
+ sbbq %r8, %r15
+ movq 120(%rbx), %r9
+ sbbq %r9, %r13
+ sbbq $0, %rdi
+ addq -184(%rbp), %rsi # 8-byte Folded Reload
+ adcq -176(%rbp), %rdx # 8-byte Folded Reload
+ movq %rsi, 32(%rbx)
+ adcq -192(%rbp), %rcx # 8-byte Folded Reload
+ movq %rdx, 40(%rbx)
+ adcq -200(%rbp), %rax # 8-byte Folded Reload
+ movq %rcx, 48(%rbx)
+ adcq %r11, %r12
+ movq %rax, 56(%rbx)
+ movq %r12, 64(%rbx)
+ adcq -208(%rbp), %r10 # 8-byte Folded Reload
+ movq %r10, 72(%rbx)
+ adcq -216(%rbp), %r15 # 8-byte Folded Reload
+ movq %r15, 80(%rbx)
+ adcq -224(%rbp), %r13 # 8-byte Folded Reload
+ movq %r13, 88(%rbx)
+ adcq -232(%rbp), %rdi # 8-byte Folded Reload
+ movq %rdi, 96(%rbx)
+ adcq $0, %r14
+ movq %r14, 104(%rbx)
+ adcq $0, %r8
+ movq %r8, 112(%rbx)
+ adcq $0, %r9
+ movq %r9, 120(%rbx)
+ addq $200, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end113:
+ .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L
+
+ .globl mcl_fp_mont8L
+ .align 16, 0x90
+ .type mcl_fp_mont8L,@function
+mcl_fp_mont8L: # @mcl_fp_mont8L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1256, %rsp # imm = 0x4E8
+ movq %rcx, %r13
+ movq %r13, 40(%rsp) # 8-byte Spill
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq %rdi, (%rsp) # 8-byte Spill
+ movq -8(%r13), %rbx
+ movq %rbx, 32(%rsp) # 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1184(%rsp), %rdi
+ callq .LmulPv512x64
+ movq 1184(%rsp), %r15
+ movq 1192(%rsp), %r14
+ movq %r15, %rdx
+ imulq %rbx, %rdx
+ movq 1248(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 1240(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 1232(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 1224(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 1216(%rsp), %r12
+ movq 1208(%rsp), %rbx
+ movq 1200(%rsp), %rbp
+ leaq 1112(%rsp), %rdi
+ movq %r13, %rsi
+ callq .LmulPv512x64
+ addq 1112(%rsp), %r15
+ adcq 1120(%rsp), %r14
+ adcq 1128(%rsp), %rbp
+ movq %rbp, 8(%rsp) # 8-byte Spill
+ adcq 1136(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ adcq 1144(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r13 # 8-byte Reload
+ adcq 1152(%rsp), %r13
+ movq 88(%rsp), %rbx # 8-byte Reload
+ adcq 1160(%rsp), %rbx
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 1168(%rsp), %rbp
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 1176(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1040(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %r15d
+ addq 1040(%rsp), %r14
+ movq 8(%rsp), %rax # 8-byte Reload
+ adcq 1048(%rsp), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 1056(%rsp), %rax
+ movq %rax, %r12
+ movq 64(%rsp), %rax # 8-byte Reload
+ adcq 1064(%rsp), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ adcq 1072(%rsp), %r13
+ movq %r13, 72(%rsp) # 8-byte Spill
+ adcq 1080(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ adcq 1088(%rsp), %rbp
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 1096(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ adcq 1104(%rsp), %r15
+ movq %r15, 56(%rsp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq %r14, %rdx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 968(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %r15d
+ addq 968(%rsp), %r14
+ movq 8(%rsp), %r13 # 8-byte Reload
+ adcq 976(%rsp), %r13
+ adcq 984(%rsp), %r12
+ movq %r12, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r14 # 8-byte Reload
+ adcq 992(%rsp), %r14
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 1000(%rsp), %rbx
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 1008(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ adcq 1016(%rsp), %rbp
+ movq %rbp, %r12
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 1024(%rsp), %rbp
+ movq 56(%rsp), %rax # 8-byte Reload
+ adcq 1032(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ adcq $0, %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 896(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %r13, %rcx
+ addq 896(%rsp), %rcx
+ movq 48(%rsp), %r13 # 8-byte Reload
+ adcq 904(%rsp), %r13
+ adcq 912(%rsp), %r14
+ adcq 920(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 928(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ adcq 936(%rsp), %r12
+ movq %r12, 80(%rsp) # 8-byte Spill
+ adcq 944(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r12 # 8-byte Reload
+ adcq 952(%rsp), %r12
+ adcq 960(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq %rcx, %rdx
+ movq %rcx, %rbp
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 824(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %ebx
+ addq 824(%rsp), %rbp
+ adcq 832(%rsp), %r13
+ movq %r13, 48(%rsp) # 8-byte Spill
+ adcq 840(%rsp), %r14
+ movq %r14, 64(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r13 # 8-byte Reload
+ adcq 848(%rsp), %r13
+ movq 88(%rsp), %rbp # 8-byte Reload
+ adcq 856(%rsp), %rbp
+ movq 80(%rsp), %r14 # 8-byte Reload
+ adcq 864(%rsp), %r14
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 872(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ adcq 880(%rsp), %r12
+ adcq 888(%rsp), %r15
+ adcq $0, %rbx
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 752(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 48(%rsp), %rax # 8-byte Reload
+ addq 752(%rsp), %rax
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 760(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 72(%rsp) # 8-byte Spill
+ adcq 776(%rsp), %rbp
+ movq %rbp, 88(%rsp) # 8-byte Spill
+ adcq 784(%rsp), %r14
+ movq %r14, 80(%rsp) # 8-byte Spill
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 792(%rsp), %rbp
+ adcq 800(%rsp), %r12
+ adcq 808(%rsp), %r15
+ adcq 816(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 680(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %r13, %rax
+ andl $1, %eax
+ addq 680(%rsp), %rbx
+ movq 64(%rsp), %r14 # 8-byte Reload
+ adcq 688(%rsp), %r14
+ movq 72(%rsp), %rcx # 8-byte Reload
+ adcq 696(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r13 # 8-byte Reload
+ adcq 704(%rsp), %r13
+ movq 80(%rsp), %rbx # 8-byte Reload
+ adcq 712(%rsp), %rbx
+ adcq 720(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq %r12, %rbp
+ adcq 728(%rsp), %rbp
+ adcq 736(%rsp), %r15
+ movq 48(%rsp), %r12 # 8-byte Reload
+ adcq 744(%rsp), %r12
+ adcq $0, %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 608(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %r14, %rax
+ addq 608(%rsp), %rax
+ movq 72(%rsp), %r14 # 8-byte Reload
+ adcq 616(%rsp), %r14
+ adcq 624(%rsp), %r13
+ movq %r13, 88(%rsp) # 8-byte Spill
+ adcq 632(%rsp), %rbx
+ movq %rbx, %r13
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 640(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ adcq 648(%rsp), %rbp
+ movq %rbp, 56(%rsp) # 8-byte Spill
+ adcq 656(%rsp), %r15
+ adcq 664(%rsp), %r12
+ movq %r12, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 672(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ sbbq %rbp, %rbp
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 536(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %rbp, %rax
+ andl $1, %eax
+ addq 536(%rsp), %rbx
+ adcq 544(%rsp), %r14
+ movq %r14, 72(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rbx # 8-byte Reload
+ adcq 552(%rsp), %rbx
+ adcq 560(%rsp), %r13
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 568(%rsp), %rbp
+ movq 56(%rsp), %r12 # 8-byte Reload
+ adcq 576(%rsp), %r12
+ adcq 584(%rsp), %r15
+ movq 48(%rsp), %rcx # 8-byte Reload
+ adcq 592(%rsp), %rcx
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r14 # 8-byte Reload
+ adcq 600(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 464(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 72(%rsp), %rax # 8-byte Reload
+ addq 464(%rsp), %rax
+ adcq 472(%rsp), %rbx
+ adcq 480(%rsp), %r13
+ movq %r13, 80(%rsp) # 8-byte Spill
+ adcq 488(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ adcq 496(%rsp), %r12
+ adcq 504(%rsp), %r15
+ movq %r15, 72(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r15 # 8-byte Reload
+ adcq 512(%rsp), %r15
+ adcq 520(%rsp), %r14
+ movq %r14, 64(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 528(%rsp), %r14
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %rbp
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 392(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq %r13, %rax
+ andl $1, %eax
+ addq 392(%rsp), %rbp
+ adcq 400(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 408(%rsp), %rbp
+ movq 96(%rsp), %rbx # 8-byte Reload
+ adcq 416(%rsp), %rbx
+ adcq 424(%rsp), %r12
+ movq 72(%rsp), %r13 # 8-byte Reload
+ adcq 432(%rsp), %r13
+ adcq 440(%rsp), %r15
+ movq %r15, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r15 # 8-byte Reload
+ adcq 448(%rsp), %r15
+ adcq 456(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 320(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 88(%rsp), %rax # 8-byte Reload
+ addq 320(%rsp), %rax
+ adcq 328(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 336(%rsp), %rbx
+ movq %rbx, 96(%rsp) # 8-byte Spill
+ movq %r12, %rbp
+ adcq 344(%rsp), %rbp
+ adcq 352(%rsp), %r13
+ movq 48(%rsp), %r12 # 8-byte Reload
+ adcq 360(%rsp), %r12
+ adcq 368(%rsp), %r15
+ movq %r15, 64(%rsp) # 8-byte Spill
+ adcq 376(%rsp), %r14
+ movq %r14, 88(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rcx # 8-byte Reload
+ adcq 384(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 248(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %r15d
+ addq 248(%rsp), %rbx
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 256(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 96(%rsp), %r14 # 8-byte Reload
+ adcq 264(%rsp), %r14
+ adcq 272(%rsp), %rbp
+ movq %rbp, 56(%rsp) # 8-byte Spill
+ movq %r13, %rbx
+ adcq 280(%rsp), %rbx
+ movq %r12, %rbp
+ adcq 288(%rsp), %rbp
+ movq 64(%rsp), %r13 # 8-byte Reload
+ adcq 296(%rsp), %r13
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 312(%rsp), %r12
+ adcq $0, %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 176(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 80(%rsp), %rax # 8-byte Reload
+ addq 176(%rsp), %rax
+ adcq 184(%rsp), %r14
+ movq %r14, 96(%rsp) # 8-byte Spill
+ movq 56(%rsp), %rcx # 8-byte Reload
+ adcq 192(%rsp), %rcx
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq 200(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 208(%rsp), %rbp
+ adcq 216(%rsp), %r13
+ movq %r13, 64(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 224(%rsp), %r14
+ adcq 232(%rsp), %r12
+ adcq 240(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq 32(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %r13
+ leaq 104(%rsp), %rdi
+ movq 40(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %ebx
+ addq 104(%rsp), %r13
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 112(%rsp), %rcx
+ movq 56(%rsp), %rdx # 8-byte Reload
+ adcq 120(%rsp), %rdx
+ movq 72(%rsp), %rsi # 8-byte Reload
+ adcq 128(%rsp), %rsi
+ movq %rbp, %rdi
+ adcq 136(%rsp), %rdi
+ movq %rdi, 48(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r8 # 8-byte Reload
+ adcq 144(%rsp), %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq %r14, %r9
+ adcq 152(%rsp), %r9
+ movq %r9, 88(%rsp) # 8-byte Spill
+ adcq 160(%rsp), %r12
+ adcq 168(%rsp), %r15
+ adcq $0, %rbx
+ movq %rcx, %rax
+ movq %rcx, %r11
+ movq 40(%rsp), %rbp # 8-byte Reload
+ subq (%rbp), %rax
+ movq %rdx, %rcx
+ movq %rdx, %r14
+ sbbq 8(%rbp), %rcx
+ movq %rsi, %rdx
+ movq %rsi, %r13
+ sbbq 16(%rbp), %rdx
+ movq %rdi, %rsi
+ sbbq 24(%rbp), %rsi
+ movq %r8, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %r9, %r10
+ sbbq 40(%rbp), %r10
+ movq %r12, %r8
+ sbbq 48(%rbp), %r8
+ movq %r15, %r9
+ sbbq 56(%rbp), %r9
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %r15, %r9
+ testb %bl, %bl
+ cmovneq %r11, %rax
+ movq (%rsp), %rbx # 8-byte Reload
+ movq %rax, (%rbx)
+ cmovneq %r14, %rcx
+ movq %rcx, 8(%rbx)
+ cmovneq %r13, %rdx
+ movq %rdx, 16(%rbx)
+ cmovneq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovneq 64(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rbx)
+ cmovneq 88(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 40(%rbx)
+ cmovneq %r12, %r8
+ movq %r8, 48(%rbx)
+ movq %r9, 56(%rbx)
+ addq $1256, %rsp # imm = 0x4E8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end114:
+ .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L
+
+ .globl mcl_fp_montNF8L
+ .align 16, 0x90
+ .type mcl_fp_montNF8L,@function
+mcl_fp_montNF8L: # @mcl_fp_montNF8L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1240, %rsp # imm = 0x4D8
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq %rdi, (%rsp) # 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 32(%rsp) # 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1168(%rsp), %rdi
+ callq .LmulPv512x64
+ movq 1168(%rsp), %r15
+ movq 1176(%rsp), %r12
+ movq %r15, %rdx
+ imulq %rbx, %rdx
+ movq 1232(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 1224(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 1216(%rsp), %r13
+ movq 1208(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 1200(%rsp), %r14
+ movq 1192(%rsp), %rbp
+ movq 1184(%rsp), %rbx
+ leaq 1096(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 1096(%rsp), %r15
+ adcq 1104(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq 1112(%rsp), %rbx
+ adcq 1120(%rsp), %rbp
+ adcq 1128(%rsp), %r14
+ movq %r14, %r12
+ movq 72(%rsp), %r14 # 8-byte Reload
+ adcq 1136(%rsp), %r14
+ adcq 1144(%rsp), %r13
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 1152(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 1160(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1024(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 1088(%rsp), %r15
+ movq 64(%rsp), %rax # 8-byte Reload
+ addq 1024(%rsp), %rax
+ adcq 1032(%rsp), %rbx
+ movq %rbx, 8(%rsp) # 8-byte Spill
+ movq %rbp, %rbx
+ adcq 1040(%rsp), %rbx
+ adcq 1048(%rsp), %r12
+ adcq 1056(%rsp), %r14
+ movq %r14, 72(%rsp) # 8-byte Spill
+ movq %r13, %rbp
+ adcq 1064(%rsp), %rbp
+ movq 80(%rsp), %rcx # 8-byte Reload
+ adcq 1072(%rsp), %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r14 # 8-byte Reload
+ adcq 1080(%rsp), %r14
+ adcq $0, %r15
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 952(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 952(%rsp), %r13
+ movq 8(%rsp), %rax # 8-byte Reload
+ adcq 960(%rsp), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ adcq 968(%rsp), %rbx
+ movq %rbx, 64(%rsp) # 8-byte Spill
+ movq %r12, %rbx
+ adcq 976(%rsp), %rbx
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 984(%rsp), %r12
+ adcq 992(%rsp), %rbp
+ movq %rbp, 40(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 1000(%rsp), %r13
+ movq %r14, %rbp
+ adcq 1008(%rsp), %rbp
+ adcq 1016(%rsp), %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 880(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 944(%rsp), %r14
+ movq 8(%rsp), %rax # 8-byte Reload
+ addq 880(%rsp), %rax
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 888(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ adcq 896(%rsp), %rbx
+ adcq 904(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq 40(%rsp), %rcx # 8-byte Reload
+ adcq 912(%rsp), %rcx
+ movq %rcx, 40(%rsp) # 8-byte Spill
+ adcq 920(%rsp), %r13
+ movq %r13, 80(%rsp) # 8-byte Spill
+ adcq 928(%rsp), %rbp
+ movq %rbp, 48(%rsp) # 8-byte Spill
+ adcq 936(%rsp), %r15
+ adcq $0, %r14
+ movq %rax, %rdx
+ movq %rax, %rbp
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 808(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 808(%rsp), %rbp
+ movq 64(%rsp), %r13 # 8-byte Reload
+ adcq 816(%rsp), %r13
+ movq %rbx, %r12
+ adcq 824(%rsp), %r12
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 832(%rsp), %rbx
+ movq 40(%rsp), %rbp # 8-byte Reload
+ adcq 840(%rsp), %rbp
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 848(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 856(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ adcq 864(%rsp), %r15
+ adcq 872(%rsp), %r14
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 736(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 800(%rsp), %rax
+ movq %r13, %rcx
+ addq 736(%rsp), %rcx
+ adcq 744(%rsp), %r12
+ movq %r12, 40(%rsp) # 8-byte Spill
+ adcq 752(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 760(%rsp), %rbp
+ movq %rbp, %r13
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 768(%rsp), %rbp
+ movq 48(%rsp), %rbx # 8-byte Reload
+ adcq 776(%rsp), %rbx
+ adcq 784(%rsp), %r15
+ adcq 792(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq %rcx, %rdx
+ movq %rcx, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 664(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 664(%rsp), %r12
+ movq 40(%rsp), %rax # 8-byte Reload
+ adcq 672(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rax # 8-byte Reload
+ adcq 680(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ adcq 688(%rsp), %r13
+ adcq 696(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 704(%rsp), %rbx
+ adcq 712(%rsp), %r15
+ adcq 720(%rsp), %r14
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 728(%rsp), %r12
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 592(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 656(%rsp), %rcx
+ movq 40(%rsp), %rax # 8-byte Reload
+ addq 592(%rsp), %rax
+ movq 72(%rsp), %rbp # 8-byte Reload
+ adcq 600(%rsp), %rbp
+ adcq 608(%rsp), %r13
+ movq %r13, 40(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 616(%rsp), %r13
+ adcq 624(%rsp), %rbx
+ adcq 632(%rsp), %r15
+ adcq 640(%rsp), %r14
+ adcq 648(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 520(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 520(%rsp), %r12
+ adcq 528(%rsp), %rbp
+ movq %rbp, 72(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r12 # 8-byte Reload
+ adcq 536(%rsp), %r12
+ movq %r13, %rbp
+ adcq 544(%rsp), %rbp
+ adcq 552(%rsp), %rbx
+ adcq 560(%rsp), %r15
+ adcq 568(%rsp), %r14
+ movq 64(%rsp), %r13 # 8-byte Reload
+ adcq 576(%rsp), %r13
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 584(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 448(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 512(%rsp), %rcx
+ movq 72(%rsp), %rax # 8-byte Reload
+ addq 448(%rsp), %rax
+ adcq 456(%rsp), %r12
+ movq %r12, 40(%rsp) # 8-byte Spill
+ adcq 464(%rsp), %rbp
+ adcq 472(%rsp), %rbx
+ adcq 480(%rsp), %r15
+ adcq 488(%rsp), %r14
+ adcq 496(%rsp), %r13
+ movq %r13, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 504(%rsp), %r13
+ adcq $0, %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 376(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 376(%rsp), %r12
+ movq 40(%rsp), %rax # 8-byte Reload
+ adcq 384(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ adcq 392(%rsp), %rbp
+ adcq 400(%rsp), %rbx
+ adcq 408(%rsp), %r15
+ adcq 416(%rsp), %r14
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 424(%rsp), %r12
+ adcq 432(%rsp), %r13
+ movq 72(%rsp), %rax # 8-byte Reload
+ adcq 440(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 304(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 368(%rsp), %rcx
+ movq 40(%rsp), %rax # 8-byte Reload
+ addq 304(%rsp), %rax
+ adcq 312(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 320(%rsp), %rbx
+ adcq 328(%rsp), %r15
+ adcq 336(%rsp), %r14
+ adcq 344(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq 352(%rsp), %r13
+ movq 72(%rsp), %rbp # 8-byte Reload
+ adcq 360(%rsp), %rbp
+ adcq $0, %rcx
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 232(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 232(%rsp), %r12
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 240(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ adcq 248(%rsp), %rbx
+ adcq 256(%rsp), %r15
+ adcq 264(%rsp), %r14
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 272(%rsp), %r12
+ adcq 280(%rsp), %r13
+ adcq 288(%rsp), %rbp
+ movq %rbp, 72(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rbp # 8-byte Reload
+ adcq 296(%rsp), %rbp
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 160(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ movq 224(%rsp), %rcx
+ movq 80(%rsp), %rax # 8-byte Reload
+ addq 160(%rsp), %rax
+ adcq 168(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ adcq 176(%rsp), %r15
+ adcq 184(%rsp), %r14
+ adcq 192(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq 200(%rsp), %r13
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 208(%rsp), %rbx
+ adcq 216(%rsp), %rbp
+ movq %rbp, %r12
+ adcq $0, %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbp
+ leaq 88(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 88(%rsp), %rbp
+ movq 48(%rsp), %r11 # 8-byte Reload
+ adcq 96(%rsp), %r11
+ adcq 104(%rsp), %r15
+ adcq 112(%rsp), %r14
+ movq 64(%rsp), %rsi # 8-byte Reload
+ adcq 120(%rsp), %rsi
+ movq %rsi, 64(%rsp) # 8-byte Spill
+ adcq 128(%rsp), %r13
+ adcq 136(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 144(%rsp), %r12
+ movq 80(%rsp), %r8 # 8-byte Reload
+ adcq 152(%rsp), %r8
+ movq %r11, %rax
+ movq 56(%rsp), %rbp # 8-byte Reload
+ subq (%rbp), %rax
+ movq %r15, %rcx
+ sbbq 8(%rbp), %rcx
+ movq %r14, %rdx
+ sbbq 16(%rbp), %rdx
+ sbbq 24(%rbp), %rsi
+ movq %r13, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %rbx, %r9
+ sbbq 40(%rbp), %r9
+ movq %r12, %r10
+ sbbq 48(%rbp), %r10
+ movq %rbp, %rbx
+ movq %r8, %rbp
+ sbbq 56(%rbx), %rbp
+ testq %rbp, %rbp
+ cmovsq %r11, %rax
+ movq (%rsp), %rbx # 8-byte Reload
+ movq %rax, (%rbx)
+ cmovsq %r15, %rcx
+ movq %rcx, 8(%rbx)
+ cmovsq %r14, %rdx
+ movq %rdx, 16(%rbx)
+ cmovsq 64(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovsq %r13, %rdi
+ movq %rdi, 32(%rbx)
+ cmovsq 72(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 40(%rbx)
+ cmovsq %r12, %r10
+ movq %r10, 48(%rbx)
+ cmovsq %r8, %rbp
+ movq %rbp, 56(%rbx)
+ addq $1240, %rsp # imm = 0x4D8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end115:
+ .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L
+
+ .globl mcl_fp_montRed8L
+ .align 16, 0x90
+ .type mcl_fp_montRed8L,@function
+mcl_fp_montRed8L: # @mcl_fp_montRed8L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $776, %rsp # imm = 0x308
+ movq %rdx, %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ movq %rdi, 72(%rsp) # 8-byte Spill
+ movq -8(%rax), %rcx
+ movq %rcx, 128(%rsp) # 8-byte Spill
+ movq (%rsi), %r15
+ movq 8(%rsi), %rdx
+ movq %rdx, 184(%rsp) # 8-byte Spill
+ movq %r15, %rdx
+ imulq %rcx, %rdx
+ movq 120(%rsi), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ movq 112(%rsi), %rcx
+ movq %rcx, 136(%rsp) # 8-byte Spill
+ movq 104(%rsi), %rcx
+ movq %rcx, 120(%rsp) # 8-byte Spill
+ movq 96(%rsi), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ movq 88(%rsi), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 80(%rsi), %rcx
+ movq %rcx, 160(%rsp) # 8-byte Spill
+ movq 72(%rsi), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ movq 64(%rsi), %r13
+ movq 56(%rsi), %rcx
+ movq %rcx, 144(%rsp) # 8-byte Spill
+ movq 48(%rsi), %r14
+ movq 40(%rsi), %rcx
+ movq %rcx, 152(%rsp) # 8-byte Spill
+ movq 32(%rsi), %r12
+ movq 24(%rsi), %rbx
+ movq 16(%rsi), %rbp
+ movq %rax, %rcx
+ movq (%rcx), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq 56(%rcx), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 48(%rcx), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq %rcx, %rsi
+ leaq 704(%rsp), %rdi
+ callq .LmulPv512x64
+ addq 704(%rsp), %r15
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 712(%rsp), %rcx
+ adcq 720(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 728(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ adcq 736(%rsp), %r12
+ movq %r12, 104(%rsp) # 8-byte Spill
+ movq 152(%rsp), %rax # 8-byte Reload
+ adcq 744(%rsp), %rax
+ movq %rax, 152(%rsp) # 8-byte Spill
+ adcq 752(%rsp), %r14
+ movq %r14, %r12
+ movq 144(%rsp), %rax # 8-byte Reload
+ adcq 760(%rsp), %rax
+ movq %rax, 144(%rsp) # 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 184(%rsp) # 8-byte Spill
+ adcq $0, 192(%rsp) # 8-byte Folded Spill
+ movq 160(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 176(%rsp) # 8-byte Folded Spill
+ adcq $0, 168(%rsp) # 8-byte Folded Spill
+ adcq $0, 120(%rsp) # 8-byte Folded Spill
+ movq 136(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ movq 96(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ sbbq %rbx, %rbx
+ movq %rcx, %rbp
+ movq %rbp, %rdx
+ imulq 128(%rsp), %rdx # 8-byte Folded Reload
+ leaq 632(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 632(%rsp), %rbp
+ movq 80(%rsp), %rsi # 8-byte Reload
+ adcq 640(%rsp), %rsi
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 648(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rcx # 8-byte Reload
+ adcq 656(%rsp), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ movq 152(%rsp), %rcx # 8-byte Reload
+ adcq 664(%rsp), %rcx
+ movq %rcx, 152(%rsp) # 8-byte Spill
+ adcq 672(%rsp), %r12
+ movq 144(%rsp), %rcx # 8-byte Reload
+ adcq 680(%rsp), %rcx
+ movq %rcx, 144(%rsp) # 8-byte Spill
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 688(%rsp), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 696(%rsp), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ adcq $0, %r15
+ movq %r15, 160(%rsp) # 8-byte Spill
+ movq 176(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ movq 168(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 120(%rsp) # 8-byte Folded Spill
+ adcq $0, %r13
+ movq %r13, 136(%rsp) # 8-byte Spill
+ adcq $0, %r14
+ movq %r14, 96(%rsp) # 8-byte Spill
+ movq %rax, %rbp
+ adcq $0, %rbp
+ movq %rsi, %rdx
+ movq %rsi, %r14
+ imulq 128(%rsp), %rdx # 8-byte Folded Reload
+ leaq 560(%rsp), %rdi
+ movq 112(%rsp), %r13 # 8-byte Reload
+ movq %r13, %rsi
+ callq .LmulPv512x64
+ addq 560(%rsp), %r14
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 568(%rsp), %rcx
+ movq 104(%rsp), %rax # 8-byte Reload
+ adcq 576(%rsp), %rax
+ movq %rax, 104(%rsp) # 8-byte Spill
+ movq 152(%rsp), %rax # 8-byte Reload
+ adcq 584(%rsp), %rax
+ movq %rax, 152(%rsp) # 8-byte Spill
+ adcq 592(%rsp), %r12
+ movq %r12, 88(%rsp) # 8-byte Spill
+ movq 144(%rsp), %r14 # 8-byte Reload
+ adcq 600(%rsp), %r14
+ movq 184(%rsp), %rax # 8-byte Reload
+ adcq 608(%rsp), %rax
+ movq %rax, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rax # 8-byte Reload
+ adcq 616(%rsp), %rax
+ movq %rax, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rax # 8-byte Reload
+ adcq 624(%rsp), %rax
+ movq %rax, 160(%rsp) # 8-byte Spill
+ adcq $0, %rbx
+ movq %rbx, 176(%rsp) # 8-byte Spill
+ adcq $0, %r15
+ movq %r15, 168(%rsp) # 8-byte Spill
+ movq 120(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ movq 136(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 96(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ movq %rcx, %rbp
+ movq %rbp, %rdx
+ movq 128(%rsp), %r12 # 8-byte Reload
+ imulq %r12, %rdx
+ leaq 488(%rsp), %rdi
+ movq %r13, %rsi
+ callq .LmulPv512x64
+ addq 488(%rsp), %rbp
+ movq 104(%rsp), %rax # 8-byte Reload
+ adcq 496(%rsp), %rax
+ movq 152(%rsp), %rbp # 8-byte Reload
+ adcq 504(%rsp), %rbp
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 512(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ adcq 520(%rsp), %r14
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 528(%rsp), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 536(%rsp), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %r13 # 8-byte Reload
+ adcq 544(%rsp), %r13
+ movq 176(%rsp), %rcx # 8-byte Reload
+ adcq 552(%rsp), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ adcq $0, 168(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 120(%rsp) # 8-byte Spill
+ movq %r15, %rbx
+ adcq $0, %rbx
+ adcq $0, 96(%rsp) # 8-byte Folded Spill
+ adcq $0, 80(%rsp) # 8-byte Folded Spill
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq %r12, %rdx
+ leaq 416(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 416(%rsp), %r15
+ adcq 424(%rsp), %rbp
+ movq %rbp, %rax
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ movq %r14, %r12
+ adcq 440(%rsp), %r12
+ movq 184(%rsp), %r14 # 8-byte Reload
+ adcq 448(%rsp), %r14
+ movq 192(%rsp), %rbp # 8-byte Reload
+ adcq 456(%rsp), %rbp
+ adcq 464(%rsp), %r13
+ movq 176(%rsp), %rcx # 8-byte Reload
+ adcq 472(%rsp), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 168(%rsp), %rcx # 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ adcq $0, 120(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 136(%rsp) # 8-byte Spill
+ movq 96(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 80(%rsp) # 8-byte Folded Spill
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 128(%rsp), %rdx # 8-byte Folded Reload
+ leaq 344(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 344(%rsp), %rbx
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 352(%rsp), %rax
+ adcq 360(%rsp), %r12
+ movq %r12, 144(%rsp) # 8-byte Spill
+ adcq 368(%rsp), %r14
+ movq %r14, 184(%rsp) # 8-byte Spill
+ adcq 376(%rsp), %rbp
+ movq %rbp, 192(%rsp) # 8-byte Spill
+ adcq 384(%rsp), %r13
+ movq %r13, 160(%rsp) # 8-byte Spill
+ movq 176(%rsp), %r13 # 8-byte Reload
+ adcq 392(%rsp), %r13
+ movq 168(%rsp), %r12 # 8-byte Reload
+ adcq 400(%rsp), %r12
+ movq 120(%rsp), %r14 # 8-byte Reload
+ adcq 408(%rsp), %r14
+ movq 136(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ movq %r15, %rbx
+ adcq $0, %rbx
+ adcq $0, 80(%rsp) # 8-byte Folded Spill
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq 128(%rsp), %rdx # 8-byte Folded Reload
+ leaq 272(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 272(%rsp), %r15
+ movq 144(%rsp), %rcx # 8-byte Reload
+ adcq 280(%rsp), %rcx
+ movq 184(%rsp), %rax # 8-byte Reload
+ adcq 288(%rsp), %rax
+ movq %rax, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rax # 8-byte Reload
+ adcq 296(%rsp), %rax
+ movq %rax, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rax # 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 160(%rsp) # 8-byte Spill
+ adcq 312(%rsp), %r13
+ movq %r13, 176(%rsp) # 8-byte Spill
+ adcq 320(%rsp), %r12
+ movq %r12, 168(%rsp) # 8-byte Spill
+ adcq 328(%rsp), %r14
+ movq %r14, %r13
+ adcq 336(%rsp), %rbp
+ movq %rbp, %r12
+ adcq $0, %rbx
+ movq %rbx, %r14
+ movq 80(%rsp), %r15 # 8-byte Reload
+ adcq $0, %r15
+ movq 128(%rsp), %rdx # 8-byte Reload
+ movq %rcx, %rbx
+ imulq %rbx, %rdx
+ leaq 200(%rsp), %rdi
+ movq 112(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv512x64
+ addq 200(%rsp), %rbx
+ movq 184(%rsp), %rax # 8-byte Reload
+ adcq 208(%rsp), %rax
+ movq %rax, 184(%rsp) # 8-byte Spill
+ movq 192(%rsp), %r8 # 8-byte Reload
+ adcq 216(%rsp), %r8
+ movq %r8, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rdx # 8-byte Reload
+ adcq 224(%rsp), %rdx
+ movq 176(%rsp), %rsi # 8-byte Reload
+ adcq 232(%rsp), %rsi
+ movq 168(%rsp), %rdi # 8-byte Reload
+ adcq 240(%rsp), %rdi
+ movq %r13, %rbp
+ adcq 248(%rsp), %rbp
+ movq %r12, %rbx
+ adcq 256(%rsp), %rbx
+ movq %rbx, 136(%rsp) # 8-byte Spill
+ movq %r14, %r9
+ adcq 264(%rsp), %r9
+ adcq $0, %r15
+ movq %r15, %r10
+ subq 16(%rsp), %rax # 8-byte Folded Reload
+ movq %r8, %rcx
+ sbbq 8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rdx, %r13
+ sbbq 24(%rsp), %r13 # 8-byte Folded Reload
+ movq %rsi, %r12
+ sbbq 32(%rsp), %r12 # 8-byte Folded Reload
+ movq %rdi, %r14
+ sbbq 40(%rsp), %r14 # 8-byte Folded Reload
+ movq %rbp, %r11
+ sbbq 48(%rsp), %r11 # 8-byte Folded Reload
+ movq %rbx, %r8
+ sbbq 56(%rsp), %r8 # 8-byte Folded Reload
+ movq %r9, %r15
+ sbbq 64(%rsp), %r9 # 8-byte Folded Reload
+ sbbq $0, %r10
+ andl $1, %r10d
+ cmovneq %r15, %r9
+ testb %r10b, %r10b
+ cmovneq 184(%rsp), %rax # 8-byte Folded Reload
+ movq 72(%rsp), %rbx # 8-byte Reload
+ movq %rax, (%rbx)
+ cmovneq 192(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 8(%rbx)
+ cmovneq %rdx, %r13
+ movq %r13, 16(%rbx)
+ cmovneq %rsi, %r12
+ movq %r12, 24(%rbx)
+ cmovneq %rdi, %r14
+ movq %r14, 32(%rbx)
+ cmovneq %rbp, %r11
+ movq %r11, 40(%rbx)
+ cmovneq 136(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 48(%rbx)
+ movq %r9, 56(%rbx)
+ addq $776, %rsp # imm = 0x308
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end116:
+ .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L
+
+ .globl mcl_fp_addPre8L
+ .align 16, 0x90
+ .type mcl_fp_addPre8L,@function
+mcl_fp_addPre8L: # @mcl_fp_addPre8L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 56(%rsi), %r15
+ movq 48(%rdx), %r9
+ movq 48(%rsi), %r12
+ movq 40(%rdx), %r10
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %r14
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rsi
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r14, %rax
+ movq %rax, 24(%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r10, %r13
+ movq %r13, 40(%rdi)
+ adcq %r9, %r12
+ movq %r12, 48(%rdi)
+ adcq %r8, %r15
+ movq %r15, 56(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end117:
+ .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L
+
+ .globl mcl_fp_subPre8L
+ .align 16, 0x90
+ .type mcl_fp_subPre8L,@function
+mcl_fp_subPre8L: # @mcl_fp_subPre8L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 56(%rsi), %r15
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %r12
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %r12
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq 48(%rsi), %r13
+ movq 40(%rsi), %rdx
+ movq 32(%rsi), %rbp
+ movq 24(%rsi), %rsi
+ movq %rbx, (%rdi)
+ movq %r12, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ sbbq %r14, %rbp
+ movq %rbp, 32(%rdi)
+ sbbq %r10, %rdx
+ movq %rdx, 40(%rdi)
+ sbbq %r9, %r13
+ movq %r13, 48(%rdi)
+ sbbq %r8, %r15
+ movq %r15, 56(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end118:
+ .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L
+
+ .globl mcl_fp_shr1_8L
+ .align 16, 0x90
+ .type mcl_fp_shr1_8L,@function
+mcl_fp_shr1_8L: # @mcl_fp_shr1_8L
+# BB#0:
+ movq 56(%rsi), %r8
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r10
+ movq 32(%rsi), %r11
+ movq 24(%rsi), %rcx
+ movq 16(%rsi), %rdx
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rax
+ movq %rax, (%rdi)
+ shrdq $1, %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 16(%rdi)
+ shrdq $1, %r11, %rcx
+ movq %rcx, 24(%rdi)
+ shrdq $1, %r10, %r11
+ movq %r11, 32(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 40(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 48(%rdi)
+ shrq %r8
+ movq %r8, 56(%rdi)
+ retq
+.Lfunc_end119:
+ .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L
+
+ .globl mcl_fp_add8L
+ .align 16, 0x90
+ .type mcl_fp_add8L,@function
+mcl_fp_add8L: # @mcl_fp_add8L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r15
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r12
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %r11
+ movq 32(%rsi), %r10
+ movq (%rdx), %r14
+ movq 8(%rdx), %rbx
+ addq (%rsi), %r14
+ adcq 8(%rsi), %rbx
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r11
+ movq 40(%rdx), %rsi
+ adcq 32(%rdx), %r10
+ movq %r14, (%rdi)
+ movq %rbx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r10, 32(%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r12, %r9
+ movq %r9, 48(%rdi)
+ adcq %r15, %r8
+ movq %r8, 56(%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %r14
+ sbbq 8(%rcx), %rbx
+ sbbq 16(%rcx), %rax
+ sbbq 24(%rcx), %r11
+ sbbq 32(%rcx), %r10
+ sbbq 40(%rcx), %rsi
+ sbbq 48(%rcx), %r9
+ sbbq 56(%rcx), %r8
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne .LBB120_2
+# BB#1: # %nocarry
+ movq %r14, (%rdi)
+ movq %rbx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r10, 32(%rdi)
+ movq %rsi, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %r8, 56(%rdi)
+.LBB120_2: # %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end120:
+ .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L
+
+ .globl mcl_fp_addNF8L
+ .align 16, 0x90
+ .type mcl_fp_addNF8L,@function
+mcl_fp_addNF8L: # @mcl_fp_addNF8L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 48(%rdx), %rbp
+ movq 40(%rdx), %rbx
+ movq 32(%rdx), %rax
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r15
+ movq (%rdx), %r13
+ movq 8(%rdx), %r12
+ addq (%rsi), %r13
+ adcq 8(%rsi), %r12
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq %rax, %r10
+ adcq 40(%rsi), %rbx
+ movq %rbx, -16(%rsp) # 8-byte Spill
+ movq %rbx, %r9
+ adcq 48(%rsi), %rbp
+ movq %rbp, -8(%rsp) # 8-byte Spill
+ movq %rbp, %rax
+ adcq 56(%rsi), %r8
+ movq %r13, %rsi
+ subq (%rcx), %rsi
+ movq %r12, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r15, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r11, %r14
+ sbbq 24(%rcx), %r14
+ movq %r10, %rbp
+ sbbq 32(%rcx), %rbp
+ movq %r9, %r10
+ sbbq 40(%rcx), %r10
+ movq %rax, %r9
+ sbbq 48(%rcx), %r9
+ movq %r8, %rax
+ sbbq 56(%rcx), %rax
+ testq %rax, %rax
+ cmovsq %r13, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r12, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r15, %rbx
+ movq %rbx, 16(%rdi)
+ cmovsq %r11, %r14
+ movq %r14, 24(%rdi)
+ cmovsq -24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 32(%rdi)
+ cmovsq -16(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 40(%rdi)
+ cmovsq -8(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end121:
+ .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L
+
+ .globl mcl_fp_sub8L
+ .align 16, 0x90
+ .type mcl_fp_sub8L,@function
+mcl_fp_sub8L: # @mcl_fp_sub8L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r12
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r13
+ movq (%rsi), %rax
+ movq 8(%rsi), %r10
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r10
+ movq 16(%rsi), %r11
+ sbbq 16(%rdx), %r11
+ movq 24(%rsi), %r15
+ sbbq 24(%rdx), %r15
+ movq 32(%rsi), %r14
+ sbbq 32(%rdx), %r14
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %rsi
+ sbbq 40(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r10, 8(%rdi)
+ movq %r11, 16(%rdi)
+ movq %r15, 24(%rdi)
+ movq %r14, 32(%rdi)
+ movq %rsi, 40(%rdi)
+ sbbq %r13, %r9
+ movq %r9, 48(%rdi)
+ sbbq %r12, %r8
+ movq %r8, 56(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB122_2
+# BB#1: # %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ movq 8(%rcx), %rax
+ adcq %r10, %rax
+ movq %rax, 8(%rdi)
+ movq 16(%rcx), %rax
+ adcq %r11, %rax
+ movq %rax, 16(%rdi)
+ movq 24(%rcx), %rax
+ adcq %r15, %rax
+ movq %rax, 24(%rdi)
+ movq 32(%rcx), %rax
+ adcq %r14, %rax
+ movq %rax, 32(%rdi)
+ movq 40(%rcx), %rax
+ adcq %rsi, %rax
+ movq %rax, 40(%rdi)
+ movq 48(%rcx), %rax
+ adcq %r9, %rax
+ movq %rax, 48(%rdi)
+ movq 56(%rcx), %rax
+ adcq %r8, %rax
+ movq %rax, 56(%rdi)
+.LBB122_2: # %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end122:
+ .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L
+
+ .globl mcl_fp_subNF8L
+ .align 16, 0x90
+ .type mcl_fp_subNF8L,@function
+mcl_fp_subNF8L: # @mcl_fp_subNF8L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq %rdi, %r9
+ movq 56(%rsi), %r14
+ movq 48(%rsi), %rax
+ movq 40(%rsi), %rcx
+ movq 32(%rsi), %rdi
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r15
+ movq (%rsi), %r13
+ movq 8(%rsi), %r12
+ subq (%rdx), %r13
+ sbbq 8(%rdx), %r12
+ sbbq 16(%rdx), %r15
+ sbbq 24(%rdx), %r11
+ sbbq 32(%rdx), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ sbbq 40(%rdx), %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ sbbq 48(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ sbbq 56(%rdx), %r14
+ movq %r14, %rsi
+ sarq $63, %rsi
+ movq 56(%r8), %r10
+ andq %rsi, %r10
+ movq 48(%r8), %rbx
+ andq %rsi, %rbx
+ movq 40(%r8), %rdi
+ andq %rsi, %rdi
+ movq 32(%r8), %rbp
+ andq %rsi, %rbp
+ movq 24(%r8), %rdx
+ andq %rsi, %rdx
+ movq 16(%r8), %rcx
+ andq %rsi, %rcx
+ movq 8(%r8), %rax
+ andq %rsi, %rax
+ andq (%r8), %rsi
+ addq %r13, %rsi
+ adcq %r12, %rax
+ movq %rsi, (%r9)
+ adcq %r15, %rcx
+ movq %rax, 8(%r9)
+ movq %rcx, 16(%r9)
+ adcq %r11, %rdx
+ movq %rdx, 24(%r9)
+ adcq -24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 32(%r9)
+ adcq -16(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 40(%r9)
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 48(%r9)
+ adcq %r14, %r10
+ movq %r10, 56(%r9)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end123:
+ .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L
+
+ .globl mcl_fpDbl_add8L
+ .align 16, 0x90
+ .type mcl_fpDbl_add8L,@function
+mcl_fpDbl_add8L: # @mcl_fpDbl_add8L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 120(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 112(%rdx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 104(%rdx), %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 96(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r11
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %r15
+ adcq 32(%rdx), %r11
+ movq 88(%rdx), %rbp
+ movq 80(%rdx), %r13
+ movq %rbx, (%rdi)
+ movq 72(%rdx), %r10
+ movq %rax, 8(%rdi)
+ movq 64(%rdx), %r9
+ movq %r12, 16(%rdi)
+ movq 40(%rdx), %r12
+ movq %r15, 24(%rdi)
+ movq 40(%rsi), %rbx
+ adcq %r12, %rbx
+ movq 56(%rdx), %r15
+ movq 48(%rdx), %r12
+ movq %r11, 32(%rdi)
+ movq 48(%rsi), %rdx
+ adcq %r12, %rdx
+ movq 120(%rsi), %r12
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rax
+ adcq %r15, %rax
+ movq 112(%rsi), %rcx
+ movq %rdx, 48(%rdi)
+ movq 64(%rsi), %rbx
+ adcq %r9, %rbx
+ movq 104(%rsi), %rdx
+ movq %rax, 56(%rdi)
+ movq 72(%rsi), %r9
+ adcq %r10, %r9
+ movq 80(%rsi), %r11
+ adcq %r13, %r11
+ movq 96(%rsi), %rax
+ movq 88(%rsi), %r15
+ adcq %rbp, %r15
+ adcq %r14, %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rdx, %rax
+ adcq -32(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -32(%rsp) # 8-byte Spill
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ adcq -8(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, -8(%rsp) # 8-byte Spill
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq %rbx, %rsi
+ subq (%r8), %rsi
+ movq %r9, %rdx
+ sbbq 8(%r8), %rdx
+ movq %r11, %r10
+ sbbq 16(%r8), %r10
+ movq %r15, %r14
+ sbbq 24(%r8), %r14
+ movq -16(%rsp), %r13 # 8-byte Reload
+ sbbq 32(%r8), %r13
+ movq %rax, %r12
+ sbbq 40(%r8), %r12
+ movq %rcx, %rax
+ sbbq 48(%r8), %rax
+ movq -8(%rsp), %rcx # 8-byte Reload
+ sbbq 56(%r8), %rcx
+ sbbq $0, %rbp
+ andl $1, %ebp
+ cmovneq %rbx, %rsi
+ movq %rsi, 64(%rdi)
+ testb %bpl, %bpl
+ cmovneq %r9, %rdx
+ movq %rdx, 72(%rdi)
+ cmovneq %r11, %r10
+ movq %r10, 80(%rdi)
+ cmovneq %r15, %r14
+ movq %r14, 88(%rdi)
+ cmovneq -16(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 96(%rdi)
+ cmovneq -32(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, 104(%rdi)
+ cmovneq -24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 112(%rdi)
+ cmovneq -8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 120(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end124:
+ .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L
+
+ .globl mcl_fpDbl_sub8L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub8L,@function
+mcl_fpDbl_sub8L: # @mcl_fpDbl_sub8L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r15
+ movq 120(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 112(%rdx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 104(%rdx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r9
+ movq (%rsi), %r12
+ movq 8(%rsi), %r14
+ xorl %r8d, %r8d
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r14
+ sbbq 16(%rdx), %r9
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r13
+ sbbq 32(%rdx), %r13
+ movq 96(%rdx), %rbp
+ movq 88(%rdx), %r11
+ movq %r12, (%rdi)
+ movq 80(%rdx), %r12
+ movq %r14, 8(%rdi)
+ movq 72(%rdx), %r10
+ movq %r9, 16(%rdi)
+ movq 40(%rdx), %r9
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %r9, %rbx
+ movq 48(%rdx), %r9
+ movq %r13, 32(%rdi)
+ movq 48(%rsi), %r14
+ sbbq %r9, %r14
+ movq 64(%rdx), %r13
+ movq 56(%rdx), %r9
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rdx
+ sbbq %r9, %rdx
+ movq 120(%rsi), %rcx
+ movq %r14, 48(%rdi)
+ movq 64(%rsi), %rbx
+ sbbq %r13, %rbx
+ movq 112(%rsi), %rax
+ movq %rdx, 56(%rdi)
+ movq 72(%rsi), %r9
+ sbbq %r10, %r9
+ movq 80(%rsi), %r13
+ sbbq %r12, %r13
+ movq 88(%rsi), %r12
+ sbbq %r11, %r12
+ movq 104(%rsi), %rdx
+ movq 96(%rsi), %r14
+ sbbq %rbp, %r14
+ sbbq -24(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -24(%rsp) # 8-byte Spill
+ sbbq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -16(%rsp) # 8-byte Spill
+ sbbq -8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -8(%rsp) # 8-byte Spill
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%r15), %r11
+ cmoveq %r8, %r11
+ testb %bpl, %bpl
+ movq 16(%r15), %rbp
+ cmoveq %r8, %rbp
+ movq 8(%r15), %rsi
+ cmoveq %r8, %rsi
+ movq 56(%r15), %r10
+ cmoveq %r8, %r10
+ movq 48(%r15), %rdx
+ cmoveq %r8, %rdx
+ movq 40(%r15), %rcx
+ cmoveq %r8, %rcx
+ movq 32(%r15), %rax
+ cmoveq %r8, %rax
+ cmovneq 24(%r15), %r8
+ addq %rbx, %r11
+ adcq %r9, %rsi
+ movq %r11, 64(%rdi)
+ adcq %r13, %rbp
+ movq %rsi, 72(%rdi)
+ movq %rbp, 80(%rdi)
+ adcq %r12, %r8
+ movq %r8, 88(%rdi)
+ adcq %r14, %rax
+ movq %rax, 96(%rdi)
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 104(%rdi)
+ adcq -16(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 112(%rdi)
+ adcq -8(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 120(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end125:
+ .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L
+
+ .align 16, 0x90
+ .type .LmulPv576x64,@function
+.LmulPv576x64: # @mulPv576x64
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rbx
+ movq %rbx, %rax
+ mulq (%rsi)
+ movq %rdx, -32(%rsp) # 8-byte Spill
+ movq %rax, (%rdi)
+ movq %rbx, %rax
+ mulq 64(%rsi)
+ movq %rdx, %r10
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 56(%rsi)
+ movq %rdx, %r14
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 48(%rsi)
+ movq %rdx, %r12
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 40(%rsi)
+ movq %rdx, %rcx
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq %rbx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r9
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r15
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq 8(%rsi)
+ addq -32(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 8(%rdi)
+ adcq %r13, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r11, %r15
+ movq %r15, 24(%rdi)
+ adcq %r8, %r9
+ movq %r9, 32(%rdi)
+ adcq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 40(%rdi)
+ adcq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 48(%rdi)
+ adcq -16(%rsp), %r12 # 8-byte Folded Reload
+ movq %r12, 56(%rdi)
+ adcq -8(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, 64(%rdi)
+ adcq $0, %r10
+ movq %r10, 72(%rdi)
+ movq %rdi, %rax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end126:
+ .size .LmulPv576x64, .Lfunc_end126-.LmulPv576x64
+
+ .globl mcl_fp_mulUnitPre9L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre9L,@function
+mcl_fp_mulUnitPre9L: # @mcl_fp_mulUnitPre9L
+# BB#0:
+ pushq %r14
+ pushq %rbx
+ subq $88, %rsp
+ movq %rdi, %rbx
+ leaq 8(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 80(%rsp), %r8
+ movq 72(%rsp), %r9
+ movq 64(%rsp), %r10
+ movq 56(%rsp), %r11
+ movq 48(%rsp), %r14
+ movq 40(%rsp), %rax
+ movq 32(%rsp), %rcx
+ movq 24(%rsp), %rdx
+ movq 8(%rsp), %rsi
+ movq 16(%rsp), %rdi
+ movq %rsi, (%rbx)
+ movq %rdi, 8(%rbx)
+ movq %rdx, 16(%rbx)
+ movq %rcx, 24(%rbx)
+ movq %rax, 32(%rbx)
+ movq %r14, 40(%rbx)
+ movq %r11, 48(%rbx)
+ movq %r10, 56(%rbx)
+ movq %r9, 64(%rbx)
+ movq %r8, 72(%rbx)
+ addq $88, %rsp
+ popq %rbx
+ popq %r14
+ retq
+.Lfunc_end127:
+ .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L
+
+ .globl mcl_fpDbl_mulPre9L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre9L,@function
+mcl_fpDbl_mulPre9L: # @mcl_fpDbl_mulPre9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $808, %rsp # imm = 0x328
+ movq %rdx, %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq %rsi, 72(%rsp) # 8-byte Spill
+ movq %rdi, %r12
+ movq %r12, 80(%rsp) # 8-byte Spill
+ movq (%rax), %rdx
+ movq %rax, %rbx
+ leaq 728(%rsp), %rdi
+ movq %rsi, %rbp
+ callq .LmulPv576x64
+ movq 800(%rsp), %r13
+ movq 792(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 784(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 776(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 768(%rsp), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq 760(%rsp), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq 752(%rsp), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq 744(%rsp), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq 728(%rsp), %rax
+ movq 736(%rsp), %r14
+ movq %rax, (%r12)
+ movq 8(%rbx), %rdx
+ leaq 648(%rsp), %rdi
+ movq %rbp, %rsi
+ callq .LmulPv576x64
+ movq 720(%rsp), %r8
+ movq 712(%rsp), %rcx
+ movq 704(%rsp), %rdx
+ movq 696(%rsp), %rsi
+ movq 688(%rsp), %rdi
+ movq 680(%rsp), %rbp
+ addq 648(%rsp), %r14
+ movq 672(%rsp), %rax
+ movq 656(%rsp), %rbx
+ movq 664(%rsp), %r15
+ movq %r14, 8(%r12)
+ adcq 8(%rsp), %rbx # 8-byte Folded Reload
+ adcq 16(%rsp), %r15 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, %r14
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 24(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 32(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 40(%rsp) # 8-byte Spill
+ adcq %r13, %rcx
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r13 # 8-byte Reload
+ movq 16(%r13), %rdx
+ leaq 568(%rsp), %rdi
+ movq 72(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 640(%rsp), %r8
+ movq 632(%rsp), %r9
+ movq 624(%rsp), %r10
+ movq 616(%rsp), %rdi
+ movq 608(%rsp), %rbp
+ movq 600(%rsp), %rcx
+ addq 568(%rsp), %rbx
+ movq 592(%rsp), %rdx
+ movq 576(%rsp), %r12
+ movq 584(%rsp), %rsi
+ movq 80(%rsp), %rax # 8-byte Reload
+ movq %rbx, 16(%rax)
+ adcq %r15, %r12
+ adcq %r14, %rsi
+ movq %rsi, (%rsp) # 8-byte Spill
+ adcq 16(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 24(%r13), %rdx
+ leaq 488(%rsp), %rdi
+ movq 72(%rsp), %r15 # 8-byte Reload
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 560(%rsp), %r8
+ movq 552(%rsp), %rcx
+ movq 544(%rsp), %rdx
+ movq 536(%rsp), %rsi
+ movq 528(%rsp), %rdi
+ movq 520(%rsp), %rbp
+ addq 488(%rsp), %r12
+ movq 512(%rsp), %rax
+ movq 496(%rsp), %rbx
+ movq 504(%rsp), %r13
+ movq 80(%rsp), %r14 # 8-byte Reload
+ movq %r12, 24(%r14)
+ adcq (%rsp), %rbx # 8-byte Folded Reload
+ adcq 8(%rsp), %r13 # 8-byte Folded Reload
+ adcq 16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r12 # 8-byte Reload
+ movq 32(%r12), %rdx
+ leaq 408(%rsp), %rdi
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 480(%rsp), %r8
+ movq 472(%rsp), %r9
+ movq 464(%rsp), %rdx
+ movq 456(%rsp), %rsi
+ movq 448(%rsp), %rdi
+ movq 440(%rsp), %rbp
+ addq 408(%rsp), %rbx
+ movq 432(%rsp), %rax
+ movq 416(%rsp), %r15
+ movq 424(%rsp), %rcx
+ movq %rbx, 32(%r14)
+ adcq %r13, %r15
+ adcq 8(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, (%rsp) # 8-byte Spill
+ adcq 16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq %r12, %r14
+ movq 40(%r14), %rdx
+ leaq 328(%rsp), %rdi
+ movq 72(%rsp), %r13 # 8-byte Reload
+ movq %r13, %rsi
+ callq .LmulPv576x64
+ movq 400(%rsp), %r8
+ movq 392(%rsp), %r9
+ movq 384(%rsp), %rsi
+ movq 376(%rsp), %rdi
+ movq 368(%rsp), %rbx
+ movq 360(%rsp), %rbp
+ addq 328(%rsp), %r15
+ movq 352(%rsp), %rcx
+ movq 336(%rsp), %r12
+ movq 344(%rsp), %rdx
+ movq 80(%rsp), %rax # 8-byte Reload
+ movq %r15, 40(%rax)
+ adcq (%rsp), %r12 # 8-byte Folded Reload
+ adcq 8(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, (%rsp) # 8-byte Spill
+ adcq 16(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 48(%r14), %rdx
+ leaq 248(%rsp), %rdi
+ movq %r13, %rsi
+ movq %r13, %r15
+ callq .LmulPv576x64
+ movq 320(%rsp), %r8
+ movq 312(%rsp), %r9
+ movq 304(%rsp), %rsi
+ movq 296(%rsp), %rdi
+ movq 288(%rsp), %rbx
+ movq 280(%rsp), %rbp
+ addq 248(%rsp), %r12
+ movq 272(%rsp), %rcx
+ movq 256(%rsp), %r13
+ movq 264(%rsp), %rdx
+ movq 80(%rsp), %rax # 8-byte Reload
+ movq %r12, 48(%rax)
+ adcq (%rsp), %r13 # 8-byte Folded Reload
+ adcq 8(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, (%rsp) # 8-byte Spill
+ adcq 16(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 8(%rsp) # 8-byte Spill
+ adcq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq 56(%r14), %rdx
+ leaq 168(%rsp), %rdi
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 240(%rsp), %rcx
+ movq 232(%rsp), %rdx
+ movq 224(%rsp), %rsi
+ movq 216(%rsp), %rdi
+ movq 208(%rsp), %rbx
+ addq 168(%rsp), %r13
+ movq 200(%rsp), %r12
+ movq 192(%rsp), %rbp
+ movq 176(%rsp), %r14
+ movq 184(%rsp), %r15
+ movq 80(%rsp), %rax # 8-byte Reload
+ movq %r13, 56(%rax)
+ adcq (%rsp), %r14 # 8-byte Folded Reload
+ adcq 8(%rsp), %r15 # 8-byte Folded Reload
+ adcq 16(%rsp), %rbp # 8-byte Folded Reload
+ adcq 24(%rsp), %r12 # 8-byte Folded Reload
+ adcq 32(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %r13
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq 64(%rsp), %rax # 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 88(%rsp), %rdi
+ movq 72(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 88(%rsp), %r14
+ adcq 96(%rsp), %r15
+ movq 160(%rsp), %r8
+ adcq 104(%rsp), %rbp
+ movq 152(%rsp), %r9
+ movq 144(%rsp), %rdx
+ movq 136(%rsp), %rsi
+ movq 128(%rsp), %rdi
+ movq 120(%rsp), %rbx
+ movq 112(%rsp), %rax
+ movq 80(%rsp), %rcx # 8-byte Reload
+ movq %r14, 64(%rcx)
+ movq %r15, 72(%rcx)
+ adcq %r12, %rax
+ movq %rbp, 80(%rcx)
+ movq %rax, 88(%rcx)
+ adcq %r13, %rbx
+ movq %rbx, 96(%rcx)
+ adcq 32(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 104(%rcx)
+ adcq 40(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 112(%rcx)
+ adcq 48(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 120(%rcx)
+ adcq 56(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 128(%rcx)
+ adcq $0, %r8
+ movq %r8, 136(%rcx)
+ addq $808, %rsp # imm = 0x328
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end128:
+ .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L
+
+ .globl mcl_fpDbl_sqrPre9L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre9L,@function
+mcl_fpDbl_sqrPre9L: # @mcl_fpDbl_sqrPre9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $808, %rsp # imm = 0x328
+ movq %rsi, %r15
+ movq %r15, 80(%rsp) # 8-byte Spill
+ movq %rdi, %r14
+ movq %r14, 72(%rsp) # 8-byte Spill
+ movq (%r15), %rdx
+ leaq 728(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 800(%rsp), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 792(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 784(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 776(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 768(%rsp), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq 760(%rsp), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq 752(%rsp), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq 744(%rsp), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq 728(%rsp), %rax
+ movq 736(%rsp), %r12
+ movq %rax, (%r14)
+ movq 8(%r15), %rdx
+ leaq 648(%rsp), %rdi
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 720(%rsp), %r8
+ movq 712(%rsp), %rcx
+ movq 704(%rsp), %rdx
+ movq 696(%rsp), %rsi
+ movq 688(%rsp), %rdi
+ movq 680(%rsp), %rbp
+ addq 648(%rsp), %r12
+ movq 672(%rsp), %rax
+ movq 656(%rsp), %rbx
+ movq 664(%rsp), %r13
+ movq %r12, 8(%r14)
+ adcq 8(%rsp), %rbx # 8-byte Folded Reload
+ adcq 16(%rsp), %r13 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 16(%r15), %rdx
+ leaq 568(%rsp), %rdi
+ movq %r15, %rsi
+ callq .LmulPv576x64
+ movq 640(%rsp), %r8
+ movq 632(%rsp), %rcx
+ movq 624(%rsp), %rdx
+ movq 616(%rsp), %rsi
+ movq 608(%rsp), %rdi
+ movq 600(%rsp), %rbp
+ addq 568(%rsp), %rbx
+ movq 592(%rsp), %rax
+ movq 576(%rsp), %r14
+ movq 584(%rsp), %r12
+ movq 72(%rsp), %r15 # 8-byte Reload
+ movq %rbx, 16(%r15)
+ adcq %r13, %r14
+ adcq 16(%rsp), %r12 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 24(%rsi), %rdx
+ leaq 488(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 560(%rsp), %r8
+ movq 552(%rsp), %rcx
+ movq 544(%rsp), %rdx
+ movq 536(%rsp), %rsi
+ movq 528(%rsp), %rdi
+ movq 520(%rsp), %rbp
+ addq 488(%rsp), %r14
+ movq 512(%rsp), %rax
+ movq 496(%rsp), %rbx
+ movq 504(%rsp), %r13
+ movq %r14, 24(%r15)
+ adcq %r12, %rbx
+ adcq 16(%rsp), %r13 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 32(%rsi), %rdx
+ leaq 408(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 480(%rsp), %r8
+ movq 472(%rsp), %rcx
+ movq 464(%rsp), %rdx
+ movq 456(%rsp), %rsi
+ movq 448(%rsp), %rdi
+ movq 440(%rsp), %rbp
+ addq 408(%rsp), %rbx
+ movq 432(%rsp), %rax
+ movq 416(%rsp), %r14
+ movq 424(%rsp), %r12
+ movq %rbx, 32(%r15)
+ adcq %r13, %r14
+ adcq 16(%rsp), %r12 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 40(%rsi), %rdx
+ leaq 328(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 400(%rsp), %r8
+ movq 392(%rsp), %rcx
+ movq 384(%rsp), %rdx
+ movq 376(%rsp), %rsi
+ movq 368(%rsp), %rdi
+ movq 360(%rsp), %rbp
+ addq 328(%rsp), %r14
+ movq 352(%rsp), %rax
+ movq 336(%rsp), %rbx
+ movq 344(%rsp), %r13
+ movq %r14, 40(%r15)
+ adcq %r12, %rbx
+ adcq 16(%rsp), %r13 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 48(%rsi), %rdx
+ leaq 248(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 320(%rsp), %r8
+ movq 312(%rsp), %rcx
+ movq 304(%rsp), %rdx
+ movq 296(%rsp), %rsi
+ movq 288(%rsp), %rdi
+ movq 280(%rsp), %rbp
+ addq 248(%rsp), %rbx
+ movq 272(%rsp), %rax
+ movq 256(%rsp), %r12
+ movq 264(%rsp), %r14
+ movq %rbx, 48(%r15)
+ adcq %r13, %r12
+ adcq 16(%rsp), %r14 # 8-byte Folded Reload
+ adcq 24(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 16(%rsp) # 8-byte Spill
+ adcq 32(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%rsp) # 8-byte Spill
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rsp) # 8-byte Spill
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 56(%rsi), %rdx
+ leaq 168(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 240(%rsp), %r8
+ movq 232(%rsp), %rdx
+ movq 224(%rsp), %rsi
+ movq 216(%rsp), %rdi
+ movq 208(%rsp), %rbx
+ movq 200(%rsp), %rcx
+ addq 168(%rsp), %r12
+ movq 192(%rsp), %r15
+ movq 176(%rsp), %r13
+ movq 184(%rsp), %rbp
+ movq 72(%rsp), %rax # 8-byte Reload
+ movq %r12, 56(%rax)
+ adcq %r14, %r13
+ adcq 16(%rsp), %rbp # 8-byte Folded Reload
+ adcq 24(%rsp), %r15 # 8-byte Folded Reload
+ adcq 32(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, %r12
+ adcq 40(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, %r14
+ adcq 48(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 40(%rsp) # 8-byte Spill
+ adcq 56(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 48(%rsp) # 8-byte Spill
+ adcq 64(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 56(%rsp) # 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 64(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rsi # 8-byte Reload
+ movq 64(%rsi), %rdx
+ leaq 88(%rsp), %rdi
+ callq .LmulPv576x64
+ addq 88(%rsp), %r13
+ adcq 96(%rsp), %rbp
+ movq 160(%rsp), %r8
+ adcq 104(%rsp), %r15
+ movq 152(%rsp), %r9
+ movq 144(%rsp), %rdx
+ movq 136(%rsp), %rsi
+ movq 128(%rsp), %rdi
+ movq 120(%rsp), %rbx
+ movq 112(%rsp), %rax
+ movq 72(%rsp), %rcx # 8-byte Reload
+ movq %r13, 64(%rcx)
+ movq %rbp, 72(%rcx)
+ adcq %r12, %rax
+ movq %r15, 80(%rcx)
+ movq %rax, 88(%rcx)
+ adcq %r14, %rbx
+ movq %rbx, 96(%rcx)
+ adcq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 104(%rcx)
+ adcq 48(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 112(%rcx)
+ adcq 56(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 120(%rcx)
+ adcq 64(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 128(%rcx)
+ adcq $0, %r8
+ movq %r8, 136(%rcx)
+ addq $808, %rsp # imm = 0x328
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end129:
+ .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L
+
+ .globl mcl_fp_mont9L
+ .align 16, 0x90
+ .type mcl_fp_mont9L,@function
+mcl_fp_mont9L: # @mcl_fp_mont9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1560, %rsp # imm = 0x618
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq %rdx, 32(%rsp) # 8-byte Spill
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq %rdi, (%rsp) # 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 16(%rsp) # 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1480(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 1480(%rsp), %r14
+ movq 1488(%rsp), %r15
+ movq %r14, %rdx
+ imulq %rbx, %rdx
+ movq 1552(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ movq 1544(%rsp), %rax
+ movq %rax, 104(%rsp) # 8-byte Spill
+ movq 1536(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 1528(%rsp), %r12
+ movq 1520(%rsp), %r13
+ movq 1512(%rsp), %rbx
+ movq 1504(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 1496(%rsp), %rbp
+ leaq 1400(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 1400(%rsp), %r14
+ adcq 1408(%rsp), %r15
+ adcq 1416(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 1424(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ adcq 1432(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ adcq 1440(%rsp), %r13
+ movq %r13, 64(%rsp) # 8-byte Spill
+ adcq 1448(%rsp), %r12
+ movq %r12, 48(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rbx # 8-byte Reload
+ adcq 1456(%rsp), %rbx
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 1464(%rsp), %r14
+ movq 112(%rsp), %r13 # 8-byte Reload
+ adcq 1472(%rsp), %r13
+ sbbq %rbp, %rbp
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1320(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebp
+ addq 1320(%rsp), %r15
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 1328(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 1336(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r12 # 8-byte Reload
+ adcq 1344(%rsp), %r12
+ movq 64(%rsp), %rax # 8-byte Reload
+ adcq 1352(%rsp), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 1360(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ adcq 1368(%rsp), %rbx
+ adcq 1376(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ adcq 1384(%rsp), %r13
+ movq %r13, 112(%rsp) # 8-byte Spill
+ adcq 1392(%rsp), %rbp
+ sbbq %r14, %r14
+ movq %r15, %rdx
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 1240(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq %r14, %rax
+ andl $1, %eax
+ addq 1240(%rsp), %r15
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 1248(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r14 # 8-byte Reload
+ adcq 1256(%rsp), %r14
+ adcq 1264(%rsp), %r12
+ movq %r12, 40(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 1272(%rsp), %r12
+ movq 48(%rsp), %r13 # 8-byte Reload
+ adcq 1280(%rsp), %r13
+ adcq 1288(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r15 # 8-byte Reload
+ adcq 1296(%rsp), %r15
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq 1304(%rsp), %rbx
+ adcq 1312(%rsp), %rbp
+ adcq $0, %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 1160(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 96(%rsp), %rax # 8-byte Reload
+ addq 1160(%rsp), %rax
+ adcq 1168(%rsp), %r14
+ movq %r14, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r14 # 8-byte Reload
+ adcq 1176(%rsp), %r14
+ adcq 1184(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ movq %r13, %r12
+ adcq 1192(%rsp), %r12
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 1200(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ adcq 1208(%rsp), %r15
+ movq %r15, %r13
+ adcq 1216(%rsp), %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ adcq 1224(%rsp), %rbp
+ movq 72(%rsp), %rcx # 8-byte Reload
+ adcq 1232(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ sbbq %r15, %r15
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 1080(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq %r15, %rax
+ andl $1, %eax
+ addq 1080(%rsp), %rbx
+ movq 80(%rsp), %rcx # 8-byte Reload
+ adcq 1088(%rsp), %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ movq %r14, %r15
+ adcq 1096(%rsp), %r15
+ movq 64(%rsp), %r14 # 8-byte Reload
+ adcq 1104(%rsp), %r14
+ movq %r12, %rbx
+ adcq 1112(%rsp), %rbx
+ movq 88(%rsp), %rcx # 8-byte Reload
+ adcq 1120(%rsp), %rcx
+ movq %rcx, 88(%rsp) # 8-byte Spill
+ adcq 1128(%rsp), %r13
+ movq %r13, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %r13 # 8-byte Reload
+ adcq 1136(%rsp), %r13
+ adcq 1144(%rsp), %rbp
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 1152(%rsp), %r12
+ adcq $0, %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 1000(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 80(%rsp), %rax # 8-byte Reload
+ addq 1000(%rsp), %rax
+ adcq 1008(%rsp), %r15
+ movq %r15, 40(%rsp) # 8-byte Spill
+ adcq 1016(%rsp), %r14
+ movq %r14, %r15
+ adcq 1024(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 1032(%rsp), %r14
+ movq 104(%rsp), %rcx # 8-byte Reload
+ adcq 1040(%rsp), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ adcq 1048(%rsp), %r13
+ movq %r13, 112(%rsp) # 8-byte Spill
+ adcq 1056(%rsp), %rbp
+ adcq 1064(%rsp), %r12
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 1072(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 920(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 920(%rsp), %r13
+ movq 40(%rsp), %rcx # 8-byte Reload
+ adcq 928(%rsp), %rcx
+ movq %rcx, 40(%rsp) # 8-byte Spill
+ adcq 936(%rsp), %r15
+ movq %r15, 64(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r15 # 8-byte Reload
+ adcq 944(%rsp), %r15
+ movq %r14, %r13
+ adcq 952(%rsp), %r13
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 960(%rsp), %r14
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq 968(%rsp), %rbx
+ adcq 976(%rsp), %rbp
+ adcq 984(%rsp), %r12
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 992(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 840(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 40(%rsp), %rax # 8-byte Reload
+ addq 840(%rsp), %rax
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 848(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ adcq 856(%rsp), %r15
+ adcq 864(%rsp), %r13
+ movq %r13, 88(%rsp) # 8-byte Spill
+ adcq 872(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ adcq 880(%rsp), %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ adcq 888(%rsp), %rbp
+ adcq 896(%rsp), %r12
+ movq 96(%rsp), %r13 # 8-byte Reload
+ adcq 904(%rsp), %r13
+ movq 80(%rsp), %rcx # 8-byte Reload
+ adcq 912(%rsp), %rcx
+ movq %rcx, 80(%rsp) # 8-byte Spill
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 760(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 760(%rsp), %r14
+ movq 64(%rsp), %rcx # 8-byte Reload
+ adcq 768(%rsp), %rcx
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ adcq 776(%rsp), %r15
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 784(%rsp), %r14
+ movq 104(%rsp), %rcx # 8-byte Reload
+ adcq 792(%rsp), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rcx # 8-byte Reload
+ adcq 800(%rsp), %rcx
+ movq %rcx, 112(%rsp) # 8-byte Spill
+ adcq 808(%rsp), %rbp
+ movq %r12, %rbx
+ adcq 816(%rsp), %rbx
+ movq %r13, %r12
+ adcq 824(%rsp), %r12
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 832(%rsp), %r13
+ adcq $0, %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 680(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 64(%rsp), %rax # 8-byte Reload
+ addq 680(%rsp), %rax
+ adcq 688(%rsp), %r15
+ movq %r15, 48(%rsp) # 8-byte Spill
+ adcq 696(%rsp), %r14
+ movq %r14, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rcx # 8-byte Reload
+ adcq 704(%rsp), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %r15 # 8-byte Reload
+ adcq 712(%rsp), %r15
+ adcq 720(%rsp), %rbp
+ adcq 728(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 736(%rsp), %r12
+ movq %r12, 96(%rsp) # 8-byte Spill
+ adcq 744(%rsp), %r13
+ movq %r13, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r13 # 8-byte Reload
+ adcq 752(%rsp), %r13
+ sbbq %r14, %r14
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 600(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %r14d
+ addq 600(%rsp), %rbx
+ movq 48(%rsp), %rax # 8-byte Reload
+ adcq 608(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 616(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rbx # 8-byte Reload
+ adcq 624(%rsp), %rbx
+ adcq 632(%rsp), %r15
+ movq %r15, 112(%rsp) # 8-byte Spill
+ adcq 640(%rsp), %rbp
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 648(%rsp), %r12
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 656(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r15 # 8-byte Reload
+ adcq 664(%rsp), %r15
+ adcq 672(%rsp), %r13
+ adcq $0, %r14
+ movq %r14, 64(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 520(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 48(%rsp), %rax # 8-byte Reload
+ addq 520(%rsp), %rax
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 528(%rsp), %r14
+ adcq 536(%rsp), %rbx
+ movq %rbx, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rcx # 8-byte Reload
+ adcq 544(%rsp), %rcx
+ movq %rcx, 112(%rsp) # 8-byte Spill
+ adcq 552(%rsp), %rbp
+ adcq 560(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq 96(%rsp), %r12 # 8-byte Reload
+ adcq 568(%rsp), %r12
+ adcq 576(%rsp), %r15
+ movq %r15, 80(%rsp) # 8-byte Spill
+ adcq 584(%rsp), %r13
+ movq %r13, 40(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r15 # 8-byte Reload
+ adcq 592(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 440(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 440(%rsp), %r13
+ adcq 448(%rsp), %r14
+ movq %r14, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 456(%rsp), %r14
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq 464(%rsp), %rbx
+ adcq 472(%rsp), %rbp
+ movq %rbp, 8(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rcx # 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ adcq 488(%rsp), %r12
+ movq %r12, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 496(%rsp), %rbp
+ movq 40(%rsp), %r12 # 8-byte Reload
+ adcq 504(%rsp), %r12
+ adcq 512(%rsp), %r15
+ movq %r15, %r13
+ adcq $0, %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 360(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 88(%rsp), %rax # 8-byte Reload
+ addq 360(%rsp), %rax
+ adcq 368(%rsp), %r14
+ adcq 376(%rsp), %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ movq 8(%rsp), %rcx # 8-byte Reload
+ adcq 384(%rsp), %rcx
+ movq %rcx, 8(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 392(%rsp), %rbx
+ movq 96(%rsp), %r15 # 8-byte Reload
+ adcq 400(%rsp), %r15
+ adcq 408(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ adcq 416(%rsp), %r12
+ movq %r12, %rbp
+ adcq 424(%rsp), %r13
+ movq %r13, 64(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rcx # 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 48(%rsp) # 8-byte Spill
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 16(%rsp), %rdx # 8-byte Folded Reload
+ leaq 280(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %r13d
+ addq 280(%rsp), %r12
+ adcq 288(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rax # 8-byte Reload
+ adcq 296(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ movq 8(%rsp), %r14 # 8-byte Reload
+ adcq 304(%rsp), %r14
+ adcq 312(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 320(%rsp), %r15
+ movq %r15, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbx # 8-byte Reload
+ adcq 328(%rsp), %rbx
+ adcq 336(%rsp), %rbp
+ movq %rbp, 40(%rsp) # 8-byte Spill
+ movq 64(%rsp), %r12 # 8-byte Reload
+ adcq 344(%rsp), %r12
+ movq 48(%rsp), %rbp # 8-byte Reload
+ adcq 352(%rsp), %rbp
+ adcq $0, %r13
+ movq 32(%rsp), %rax # 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 200(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 104(%rsp), %rax # 8-byte Reload
+ addq 200(%rsp), %rax
+ movq 112(%rsp), %r15 # 8-byte Reload
+ adcq 208(%rsp), %r15
+ adcq 216(%rsp), %r14
+ movq %r14, 8(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r14 # 8-byte Reload
+ adcq 224(%rsp), %r14
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 232(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ adcq 240(%rsp), %rbx
+ movq %rbx, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %rcx # 8-byte Reload
+ adcq 248(%rsp), %rcx
+ movq %rcx, 40(%rsp) # 8-byte Spill
+ adcq 256(%rsp), %r12
+ movq %r12, 64(%rsp) # 8-byte Spill
+ adcq 264(%rsp), %rbp
+ movq %rbp, 48(%rsp) # 8-byte Spill
+ adcq 272(%rsp), %r13
+ sbbq %rbx, %rbx
+ movq 16(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %r12
+ leaq 120(%rsp), %rdi
+ movq 56(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %ebx
+ addq 120(%rsp), %r12
+ adcq 128(%rsp), %r15
+ movq 8(%rsp), %rbp # 8-byte Reload
+ adcq 136(%rsp), %rbp
+ movq %r14, %rcx
+ adcq 144(%rsp), %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ movq 96(%rsp), %r8 # 8-byte Reload
+ adcq 152(%rsp), %r8
+ movq %r8, 96(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r9 # 8-byte Reload
+ adcq 160(%rsp), %r9
+ movq %r9, 80(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r10 # 8-byte Reload
+ adcq 168(%rsp), %r10
+ movq %r10, 40(%rsp) # 8-byte Spill
+ movq 64(%rsp), %rdi # 8-byte Reload
+ adcq 176(%rsp), %rdi
+ movq %rdi, 64(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r14 # 8-byte Reload
+ adcq 184(%rsp), %r14
+ adcq 192(%rsp), %r13
+ adcq $0, %rbx
+ movq %r15, %rsi
+ movq %r15, %r12
+ movq 56(%rsp), %rdx # 8-byte Reload
+ subq (%rdx), %rsi
+ movq %rbp, %rax
+ movq %rbp, %r15
+ sbbq 8(%rdx), %rax
+ movq %rcx, %rbp
+ sbbq 16(%rdx), %rbp
+ movq %r8, %rcx
+ sbbq 24(%rdx), %rcx
+ movq %r9, %r8
+ sbbq 32(%rdx), %r8
+ movq %r10, %r11
+ sbbq 40(%rdx), %r11
+ movq %rdi, %r10
+ sbbq 48(%rdx), %r10
+ movq %r14, %rdi
+ sbbq 56(%rdx), %rdi
+ movq %r13, %r9
+ sbbq 64(%rdx), %r9
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %r13, %r9
+ testb %bl, %bl
+ cmovneq %r12, %rsi
+ movq (%rsp), %rbx # 8-byte Reload
+ movq %rsi, (%rbx)
+ cmovneq %r15, %rax
+ movq %rax, 8(%rbx)
+ cmovneq 72(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rbx)
+ cmovneq 96(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 24(%rbx)
+ cmovneq 80(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 32(%rbx)
+ cmovneq 40(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 40(%rbx)
+ cmovneq 64(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 48(%rbx)
+ cmovneq %r14, %rdi
+ movq %rdi, 56(%rbx)
+ movq %r9, 64(%rbx)
+ addq $1560, %rsp # imm = 0x618
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end130:
+ .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L
+
+ .globl mcl_fp_montNF9L
+ .align 16, 0x90
+ .type mcl_fp_montNF9L,@function
+mcl_fp_montNF9L: # @mcl_fp_montNF9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1560, %rsp # imm = 0x618
+ movq %rcx, 64(%rsp) # 8-byte Spill
+ movq %rdx, 16(%rsp) # 8-byte Spill
+ movq %rsi, 24(%rsp) # 8-byte Spill
+ movq %rdi, (%rsp) # 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 32(%rsp) # 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1480(%rsp), %rdi
+ callq .LmulPv576x64
+ movq 1480(%rsp), %r12
+ movq 1488(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq %r12, %rdx
+ imulq %rbx, %rdx
+ movq 1552(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ movq 1544(%rsp), %r13
+ movq 1536(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 1528(%rsp), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 1520(%rsp), %r14
+ movq 1512(%rsp), %r15
+ movq 1504(%rsp), %rbx
+ movq 1496(%rsp), %rbp
+ leaq 1400(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 1400(%rsp), %r12
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 1408(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ adcq 1416(%rsp), %rbp
+ movq %rbp, 8(%rsp) # 8-byte Spill
+ adcq 1424(%rsp), %rbx
+ movq %rbx, 104(%rsp) # 8-byte Spill
+ adcq 1432(%rsp), %r15
+ movq %r15, 56(%rsp) # 8-byte Spill
+ adcq 1440(%rsp), %r14
+ movq %r14, 40(%rsp) # 8-byte Spill
+ movq 48(%rsp), %rbx # 8-byte Reload
+ adcq 1448(%rsp), %rbx
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 1456(%rsp), %r12
+ adcq 1464(%rsp), %r13
+ movq %r13, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rbp # 8-byte Reload
+ adcq 1472(%rsp), %rbp
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1320(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 1392(%rsp), %rax
+ movq 88(%rsp), %rcx # 8-byte Reload
+ addq 1320(%rsp), %rcx
+ movq 8(%rsp), %r15 # 8-byte Reload
+ adcq 1328(%rsp), %r15
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 1336(%rsp), %r14
+ movq 56(%rsp), %rdx # 8-byte Reload
+ adcq 1344(%rsp), %rdx
+ movq %rdx, 56(%rsp) # 8-byte Spill
+ movq 40(%rsp), %r13 # 8-byte Reload
+ adcq 1352(%rsp), %r13
+ adcq 1360(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ adcq 1368(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq 96(%rsp), %rdx # 8-byte Reload
+ adcq 1376(%rsp), %rdx
+ movq %rdx, 96(%rsp) # 8-byte Spill
+ adcq 1384(%rsp), %rbp
+ movq %rbp, 112(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, %rbp
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 1240(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 1240(%rsp), %rbx
+ adcq 1248(%rsp), %r15
+ movq %r15, 8(%rsp) # 8-byte Spill
+ adcq 1256(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r12 # 8-byte Reload
+ adcq 1264(%rsp), %r12
+ adcq 1272(%rsp), %r13
+ movq %r13, %r14
+ movq 48(%rsp), %r13 # 8-byte Reload
+ adcq 1280(%rsp), %r13
+ movq 72(%rsp), %rbx # 8-byte Reload
+ adcq 1288(%rsp), %rbx
+ movq 96(%rsp), %r15 # 8-byte Reload
+ adcq 1296(%rsp), %r15
+ movq 112(%rsp), %rax # 8-byte Reload
+ adcq 1304(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ adcq 1312(%rsp), %rbp
+ movq %rbp, 80(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 1160(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 1232(%rsp), %rax
+ movq 8(%rsp), %rcx # 8-byte Reload
+ addq 1160(%rsp), %rcx
+ movq 104(%rsp), %rbp # 8-byte Reload
+ adcq 1168(%rsp), %rbp
+ adcq 1176(%rsp), %r12
+ movq %r12, 56(%rsp) # 8-byte Spill
+ adcq 1184(%rsp), %r14
+ adcq 1192(%rsp), %r13
+ movq %r13, %r12
+ adcq 1200(%rsp), %rbx
+ movq %rbx, 72(%rsp) # 8-byte Spill
+ adcq 1208(%rsp), %r15
+ movq %r15, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq 1216(%rsp), %rbx
+ movq 80(%rsp), %rdx # 8-byte Reload
+ adcq 1224(%rsp), %rdx
+ movq %rdx, 80(%rsp) # 8-byte Spill
+ movq %rax, %r15
+ adcq $0, %r15
+ movq %rcx, %rdx
+ movq %rcx, %r13
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 1080(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 1080(%rsp), %r13
+ adcq 1088(%rsp), %rbp
+ movq %rbp, 104(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r13 # 8-byte Reload
+ adcq 1096(%rsp), %r13
+ adcq 1104(%rsp), %r14
+ adcq 1112(%rsp), %r12
+ movq %r12, 48(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 1120(%rsp), %r12
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 1128(%rsp), %rbp
+ adcq 1136(%rsp), %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbx # 8-byte Reload
+ adcq 1144(%rsp), %rbx
+ adcq 1152(%rsp), %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 1000(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 1072(%rsp), %rax
+ movq 104(%rsp), %rcx # 8-byte Reload
+ addq 1000(%rsp), %rcx
+ adcq 1008(%rsp), %r13
+ movq %r13, 56(%rsp) # 8-byte Spill
+ adcq 1016(%rsp), %r14
+ movq %r14, 40(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r14 # 8-byte Reload
+ adcq 1024(%rsp), %r14
+ adcq 1032(%rsp), %r12
+ adcq 1040(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %r13 # 8-byte Reload
+ adcq 1048(%rsp), %r13
+ adcq 1056(%rsp), %rbx
+ movq %rbx, 80(%rsp) # 8-byte Spill
+ adcq 1064(%rsp), %r15
+ movq %r15, 88(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, 104(%rsp) # 8-byte Spill
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 920(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 920(%rsp), %rbx
+ movq 56(%rsp), %rax # 8-byte Reload
+ adcq 928(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 40(%rsp), %rbp # 8-byte Reload
+ adcq 936(%rsp), %rbp
+ movq %r14, %rbx
+ adcq 944(%rsp), %rbx
+ adcq 952(%rsp), %r12
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 960(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ adcq 968(%rsp), %r13
+ movq %r13, %r15
+ movq 80(%rsp), %r13 # 8-byte Reload
+ adcq 976(%rsp), %r13
+ movq 88(%rsp), %r14 # 8-byte Reload
+ adcq 984(%rsp), %r14
+ movq 104(%rsp), %rax # 8-byte Reload
+ adcq 992(%rsp), %rax
+ movq %rax, 104(%rsp) # 8-byte Spill
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 840(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 912(%rsp), %rax
+ movq 56(%rsp), %rcx # 8-byte Reload
+ addq 840(%rsp), %rcx
+ adcq 848(%rsp), %rbp
+ movq %rbp, 40(%rsp) # 8-byte Spill
+ adcq 856(%rsp), %rbx
+ movq %rbx, 48(%rsp) # 8-byte Spill
+ adcq 864(%rsp), %r12
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 872(%rsp), %rbp
+ adcq 880(%rsp), %r15
+ movq %r15, 112(%rsp) # 8-byte Spill
+ adcq 888(%rsp), %r13
+ adcq 896(%rsp), %r14
+ movq %r14, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rdx # 8-byte Reload
+ adcq 904(%rsp), %rdx
+ movq %rdx, 104(%rsp) # 8-byte Spill
+ adcq $0, %rax
+ movq %rax, %r14
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 760(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 760(%rsp), %rbx
+ movq 40(%rsp), %rax # 8-byte Reload
+ adcq 768(%rsp), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 48(%rsp), %r15 # 8-byte Reload
+ adcq 776(%rsp), %r15
+ adcq 784(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq %rbp, %rbx
+ adcq 792(%rsp), %rbx
+ movq 112(%rsp), %rbp # 8-byte Reload
+ adcq 800(%rsp), %rbp
+ adcq 808(%rsp), %r13
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 816(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r12 # 8-byte Reload
+ adcq 824(%rsp), %r12
+ adcq 832(%rsp), %r14
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 680(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 752(%rsp), %rcx
+ movq 40(%rsp), %rax # 8-byte Reload
+ addq 680(%rsp), %rax
+ adcq 688(%rsp), %r15
+ movq %r15, 48(%rsp) # 8-byte Spill
+ movq 72(%rsp), %rdx # 8-byte Reload
+ adcq 696(%rsp), %rdx
+ movq %rdx, 72(%rsp) # 8-byte Spill
+ adcq 704(%rsp), %rbx
+ movq %rbx, 96(%rsp) # 8-byte Spill
+ adcq 712(%rsp), %rbp
+ movq %rbp, 112(%rsp) # 8-byte Spill
+ adcq 720(%rsp), %r13
+ movq %r13, %r15
+ movq 88(%rsp), %rbx # 8-byte Reload
+ adcq 728(%rsp), %rbx
+ adcq 736(%rsp), %r12
+ movq %r12, 104(%rsp) # 8-byte Spill
+ adcq 744(%rsp), %r14
+ movq %r14, 40(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 56(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 600(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 600(%rsp), %r13
+ movq 48(%rsp), %r13 # 8-byte Reload
+ adcq 608(%rsp), %r13
+ movq 72(%rsp), %r12 # 8-byte Reload
+ adcq 616(%rsp), %r12
+ movq 96(%rsp), %rbp # 8-byte Reload
+ adcq 624(%rsp), %rbp
+ movq 112(%rsp), %rax # 8-byte Reload
+ adcq 632(%rsp), %rax
+ movq %rax, 112(%rsp) # 8-byte Spill
+ adcq 640(%rsp), %r15
+ movq %r15, 80(%rsp) # 8-byte Spill
+ adcq 648(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq 656(%rsp), %r14
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq 664(%rsp), %rbx
+ movq 56(%rsp), %r15 # 8-byte Reload
+ adcq 672(%rsp), %r15
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 520(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 592(%rsp), %rcx
+ movq %r13, %rax
+ addq 520(%rsp), %rax
+ adcq 528(%rsp), %r12
+ movq %r12, 72(%rsp) # 8-byte Spill
+ movq %rbp, %r12
+ adcq 536(%rsp), %r12
+ movq 112(%rsp), %rbp # 8-byte Reload
+ adcq 544(%rsp), %rbp
+ movq 80(%rsp), %rdx # 8-byte Reload
+ adcq 552(%rsp), %rdx
+ movq %rdx, 80(%rsp) # 8-byte Spill
+ movq 88(%rsp), %rdx # 8-byte Reload
+ adcq 560(%rsp), %rdx
+ movq %rdx, 88(%rsp) # 8-byte Spill
+ adcq 568(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ adcq 576(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ adcq 584(%rsp), %r15
+ movq %r15, 56(%rsp) # 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, %r13
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 440(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 440(%rsp), %r14
+ movq 72(%rsp), %rax # 8-byte Reload
+ adcq 448(%rsp), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ adcq 456(%rsp), %r12
+ adcq 464(%rsp), %rbp
+ movq %rbp, 112(%rsp) # 8-byte Spill
+ movq 80(%rsp), %r14 # 8-byte Reload
+ adcq 472(%rsp), %r14
+ movq 88(%rsp), %r15 # 8-byte Reload
+ adcq 480(%rsp), %r15
+ movq 104(%rsp), %rbp # 8-byte Reload
+ adcq 488(%rsp), %rbp
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq 496(%rsp), %rbx
+ movq 56(%rsp), %rax # 8-byte Reload
+ adcq 504(%rsp), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ adcq 512(%rsp), %r13
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 360(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 432(%rsp), %rcx
+ movq 72(%rsp), %rax # 8-byte Reload
+ addq 360(%rsp), %rax
+ adcq 368(%rsp), %r12
+ movq %r12, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rdx # 8-byte Reload
+ adcq 376(%rsp), %rdx
+ movq %rdx, 112(%rsp) # 8-byte Spill
+ adcq 384(%rsp), %r14
+ movq %r14, 80(%rsp) # 8-byte Spill
+ adcq 392(%rsp), %r15
+ movq %r15, 88(%rsp) # 8-byte Spill
+ adcq 400(%rsp), %rbp
+ movq %rbp, 104(%rsp) # 8-byte Spill
+ adcq 408(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r14 # 8-byte Reload
+ adcq 416(%rsp), %r14
+ adcq 424(%rsp), %r13
+ movq %r13, %r15
+ adcq $0, %rcx
+ movq %rcx, 72(%rsp) # 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 32(%rsp), %rdx # 8-byte Folded Reload
+ leaq 280(%rsp), %rdi
+ movq 64(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 280(%rsp), %r12
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 288(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 112(%rsp), %rbp # 8-byte Reload
+ adcq 296(%rsp), %rbp
+ movq 80(%rsp), %rax # 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 80(%rsp) # 8-byte Spill
+ movq 88(%rsp), %r13 # 8-byte Reload
+ adcq 312(%rsp), %r13
+ movq 104(%rsp), %r12 # 8-byte Reload
+ adcq 320(%rsp), %r12
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq 328(%rsp), %rbx
+ adcq 336(%rsp), %r14
+ movq %r14, 56(%rsp) # 8-byte Spill
+ adcq 344(%rsp), %r15
+ movq %r15, 48(%rsp) # 8-byte Spill
+ movq 72(%rsp), %r14 # 8-byte Reload
+ adcq 352(%rsp), %r14
+ movq 16(%rsp), %rax # 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 200(%rsp), %rdi
+ movq 24(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ movq 272(%rsp), %rcx
+ movq 96(%rsp), %rax # 8-byte Reload
+ addq 200(%rsp), %rax
+ adcq 208(%rsp), %rbp
+ movq %rbp, 112(%rsp) # 8-byte Spill
+ movq 80(%rsp), %rbp # 8-byte Reload
+ adcq 216(%rsp), %rbp
+ adcq 224(%rsp), %r13
+ movq %r13, 88(%rsp) # 8-byte Spill
+ adcq 232(%rsp), %r12
+ movq %r12, 104(%rsp) # 8-byte Spill
+ adcq 240(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ movq 56(%rsp), %r15 # 8-byte Reload
+ adcq 248(%rsp), %r15
+ movq 48(%rsp), %r12 # 8-byte Reload
+ adcq 256(%rsp), %r12
+ adcq 264(%rsp), %r14
+ adcq $0, %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ movq 32(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbx
+ leaq 120(%rsp), %rdi
+ movq 64(%rsp), %r13 # 8-byte Reload
+ movq %r13, %rsi
+ callq .LmulPv576x64
+ addq 120(%rsp), %rbx
+ movq 112(%rsp), %rcx # 8-byte Reload
+ adcq 128(%rsp), %rcx
+ movq %rbp, %rdx
+ adcq 136(%rsp), %rdx
+ movq 88(%rsp), %rsi # 8-byte Reload
+ adcq 144(%rsp), %rsi
+ movq %rsi, 88(%rsp) # 8-byte Spill
+ movq 104(%rsp), %rdi # 8-byte Reload
+ adcq 152(%rsp), %rdi
+ movq %rdi, 104(%rsp) # 8-byte Spill
+ movq 40(%rsp), %rbx # 8-byte Reload
+ adcq 160(%rsp), %rbx
+ movq %rbx, 40(%rsp) # 8-byte Spill
+ movq %r15, %r8
+ adcq 168(%rsp), %r8
+ movq %r8, 56(%rsp) # 8-byte Spill
+ movq %r12, %r15
+ adcq 176(%rsp), %r15
+ adcq 184(%rsp), %r14
+ movq 96(%rsp), %r9 # 8-byte Reload
+ adcq 192(%rsp), %r9
+ movq %rcx, %rax
+ movq %rcx, %r11
+ movq %r13, %rbp
+ subq (%rbp), %rax
+ movq %rdx, %rcx
+ movq %rdx, %r12
+ sbbq 8(%rbp), %rcx
+ movq %rsi, %rdx
+ sbbq 16(%rbp), %rdx
+ movq %rdi, %rsi
+ sbbq 24(%rbp), %rsi
+ movq %rbx, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %r8, %r10
+ sbbq 40(%rbp), %r10
+ movq %r15, %r13
+ sbbq 48(%rbp), %r13
+ movq %r14, %r8
+ sbbq 56(%rbp), %r8
+ movq %rbp, %rbx
+ movq %r9, %rbp
+ sbbq 64(%rbx), %rbp
+ movq %rbp, %rbx
+ sarq $63, %rbx
+ cmovsq %r11, %rax
+ movq (%rsp), %rbx # 8-byte Reload
+ movq %rax, (%rbx)
+ cmovsq %r12, %rcx
+ movq %rcx, 8(%rbx)
+ cmovsq 88(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 16(%rbx)
+ cmovsq 104(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovsq 40(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 32(%rbx)
+ cmovsq 56(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 40(%rbx)
+ cmovsq %r15, %r13
+ movq %r13, 48(%rbx)
+ cmovsq %r14, %r8
+ movq %r8, 56(%rbx)
+ cmovsq %r9, %rbp
+ movq %rbp, 64(%rbx)
+ addq $1560, %rsp # imm = 0x618
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end131:
+ .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L
+
+ .globl mcl_fp_montRed9L
+ .align 16, 0x90
+ .type mcl_fp_montRed9L,@function
+mcl_fp_montRed9L: # @mcl_fp_montRed9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $936, %rsp # imm = 0x3A8
+ movq %rdx, %rax
+ movq %rax, 128(%rsp) # 8-byte Spill
+ movq %rdi, 80(%rsp) # 8-byte Spill
+ movq -8(%rax), %rcx
+ movq %rcx, 120(%rsp) # 8-byte Spill
+ movq (%rsi), %r14
+ movq 8(%rsi), %rdx
+ movq %rdx, 192(%rsp) # 8-byte Spill
+ movq %r14, %rdx
+ imulq %rcx, %rdx
+ movq 136(%rsi), %rcx
+ movq %rcx, 112(%rsp) # 8-byte Spill
+ movq 128(%rsi), %rcx
+ movq %rcx, 152(%rsp) # 8-byte Spill
+ movq 120(%rsi), %rcx
+ movq %rcx, 104(%rsp) # 8-byte Spill
+ movq 112(%rsi), %rcx
+ movq %rcx, 144(%rsp) # 8-byte Spill
+ movq 104(%rsi), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ movq 96(%rsi), %rcx
+ movq %rcx, 208(%rsp) # 8-byte Spill
+ movq 88(%rsi), %rcx
+ movq %rcx, 200(%rsp) # 8-byte Spill
+ movq 80(%rsi), %rcx
+ movq %rcx, 160(%rsp) # 8-byte Spill
+ movq 72(%rsi), %r12
+ movq 64(%rsi), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 56(%rsi), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ movq 48(%rsi), %rcx
+ movq %rcx, 136(%rsp) # 8-byte Spill
+ movq 40(%rsi), %rbp
+ movq 32(%rsi), %rbx
+ movq 24(%rsi), %r13
+ movq 16(%rsi), %r15
+ movq %rax, %rcx
+ movq (%rcx), %rax
+ movq %rax, 16(%rsp) # 8-byte Spill
+ movq 64(%rcx), %rax
+ movq %rax, 72(%rsp) # 8-byte Spill
+ movq 56(%rcx), %rax
+ movq %rax, 64(%rsp) # 8-byte Spill
+ movq 48(%rcx), %rax
+ movq %rax, 56(%rsp) # 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 48(%rsp) # 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 40(%rsp) # 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, 32(%rsp) # 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 24(%rsp) # 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, 8(%rsp) # 8-byte Spill
+ movq %rcx, %rsi
+ leaq 856(%rsp), %rdi
+ callq .LmulPv576x64
+ addq 856(%rsp), %r14
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 864(%rsp), %rcx
+ adcq 872(%rsp), %r15
+ adcq 880(%rsp), %r13
+ adcq 888(%rsp), %rbx
+ movq %rbx, 88(%rsp) # 8-byte Spill
+ adcq 896(%rsp), %rbp
+ movq %rbp, 96(%rsp) # 8-byte Spill
+ movq 136(%rsp), %rax # 8-byte Reload
+ adcq 904(%rsp), %rax
+ movq %rax, 136(%rsp) # 8-byte Spill
+ movq 168(%rsp), %rax # 8-byte Reload
+ adcq 912(%rsp), %rax
+ movq %rax, 168(%rsp) # 8-byte Spill
+ movq 176(%rsp), %rax # 8-byte Reload
+ adcq 920(%rsp), %rax
+ movq %rax, 176(%rsp) # 8-byte Spill
+ adcq 928(%rsp), %r12
+ movq %r12, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rbp # 8-byte Reload
+ adcq $0, %rbp
+ adcq $0, 200(%rsp) # 8-byte Folded Spill
+ adcq $0, 208(%rsp) # 8-byte Folded Spill
+ adcq $0, 184(%rsp) # 8-byte Folded Spill
+ adcq $0, 144(%rsp) # 8-byte Folded Spill
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ adcq $0, 152(%rsp) # 8-byte Folded Spill
+ movq 112(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ sbbq %r12, %r12
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 776(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ andl $1, %r12d
+ addq 776(%rsp), %rbx
+ adcq 784(%rsp), %r15
+ adcq 792(%rsp), %r13
+ movq %r13, (%rsp) # 8-byte Spill
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 800(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 808(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 136(%rsp), %rax # 8-byte Reload
+ adcq 816(%rsp), %rax
+ movq %rax, 136(%rsp) # 8-byte Spill
+ movq 168(%rsp), %rax # 8-byte Reload
+ adcq 824(%rsp), %rax
+ movq %rax, 168(%rsp) # 8-byte Spill
+ movq 176(%rsp), %rax # 8-byte Reload
+ adcq 832(%rsp), %rax
+ movq %rax, 176(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rax # 8-byte Reload
+ adcq 840(%rsp), %rax
+ movq %rax, 192(%rsp) # 8-byte Spill
+ adcq 848(%rsp), %rbp
+ movq %rbp, 160(%rsp) # 8-byte Spill
+ movq 200(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ adcq $0, 208(%rsp) # 8-byte Folded Spill
+ adcq $0, 184(%rsp) # 8-byte Folded Spill
+ adcq $0, 144(%rsp) # 8-byte Folded Spill
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ movq 152(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, %r14
+ movq %r14, 112(%rsp) # 8-byte Spill
+ adcq $0, %r12
+ movq %r15, %rdx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 696(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 696(%rsp), %r15
+ movq (%rsp), %rcx # 8-byte Reload
+ adcq 704(%rsp), %rcx
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 712(%rsp), %rax
+ movq %rax, 88(%rsp) # 8-byte Spill
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 720(%rsp), %rax
+ movq %rax, 96(%rsp) # 8-byte Spill
+ movq 136(%rsp), %rbp # 8-byte Reload
+ adcq 728(%rsp), %rbp
+ movq 168(%rsp), %r14 # 8-byte Reload
+ adcq 736(%rsp), %r14
+ movq 176(%rsp), %r15 # 8-byte Reload
+ adcq 744(%rsp), %r15
+ movq 192(%rsp), %rax # 8-byte Reload
+ adcq 752(%rsp), %rax
+ movq %rax, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rax # 8-byte Reload
+ adcq 760(%rsp), %rax
+ movq %rax, 160(%rsp) # 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 200(%rsp) # 8-byte Spill
+ adcq $0, 208(%rsp) # 8-byte Folded Spill
+ movq 184(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ adcq $0, 144(%rsp) # 8-byte Folded Spill
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 152(%rsp) # 8-byte Spill
+ adcq $0, 112(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rcx, %rbx
+ movq %rbx, %rdx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 616(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 616(%rsp), %rbx
+ movq 88(%rsp), %rax # 8-byte Reload
+ adcq 624(%rsp), %rax
+ movq 96(%rsp), %rcx # 8-byte Reload
+ adcq 632(%rsp), %rcx
+ movq %rcx, 96(%rsp) # 8-byte Spill
+ adcq 640(%rsp), %rbp
+ movq %rbp, 136(%rsp) # 8-byte Spill
+ adcq 648(%rsp), %r14
+ movq %r14, 168(%rsp) # 8-byte Spill
+ adcq 656(%rsp), %r15
+ movq 192(%rsp), %r14 # 8-byte Reload
+ adcq 664(%rsp), %r14
+ movq 160(%rsp), %rbp # 8-byte Reload
+ adcq 672(%rsp), %rbp
+ movq 200(%rsp), %rcx # 8-byte Reload
+ adcq 680(%rsp), %rcx
+ movq %rcx, 200(%rsp) # 8-byte Spill
+ movq 208(%rsp), %rcx # 8-byte Reload
+ adcq 688(%rsp), %rcx
+ movq %rcx, 208(%rsp) # 8-byte Spill
+ adcq $0, %r13
+ movq %r13, 184(%rsp) # 8-byte Spill
+ adcq $0, 144(%rsp) # 8-byte Folded Spill
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ adcq $0, 152(%rsp) # 8-byte Folded Spill
+ adcq $0, 112(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 536(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 536(%rsp), %rbx
+ movq 96(%rsp), %rax # 8-byte Reload
+ adcq 544(%rsp), %rax
+ movq 136(%rsp), %rcx # 8-byte Reload
+ adcq 552(%rsp), %rcx
+ movq %rcx, 136(%rsp) # 8-byte Spill
+ movq 168(%rsp), %rcx # 8-byte Reload
+ adcq 560(%rsp), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ adcq 568(%rsp), %r15
+ movq %r15, 176(%rsp) # 8-byte Spill
+ adcq 576(%rsp), %r14
+ movq %r14, 192(%rsp) # 8-byte Spill
+ adcq 584(%rsp), %rbp
+ movq %rbp, 160(%rsp) # 8-byte Spill
+ movq 200(%rsp), %r13 # 8-byte Reload
+ adcq 592(%rsp), %r13
+ movq 208(%rsp), %r15 # 8-byte Reload
+ adcq 600(%rsp), %r15
+ movq 184(%rsp), %rbp # 8-byte Reload
+ adcq 608(%rsp), %rbp
+ movq 144(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, 104(%rsp) # 8-byte Folded Spill
+ adcq $0, 152(%rsp) # 8-byte Folded Spill
+ adcq $0, 112(%rsp) # 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 456(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 456(%rsp), %r14
+ movq 136(%rsp), %rax # 8-byte Reload
+ adcq 464(%rsp), %rax
+ movq 168(%rsp), %rcx # 8-byte Reload
+ adcq 472(%rsp), %rcx
+ movq %rcx, 168(%rsp) # 8-byte Spill
+ movq 176(%rsp), %rcx # 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 488(%rsp), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rcx # 8-byte Reload
+ adcq 496(%rsp), %rcx
+ movq %rcx, 160(%rsp) # 8-byte Spill
+ adcq 504(%rsp), %r13
+ movq %r13, 200(%rsp) # 8-byte Spill
+ adcq 512(%rsp), %r15
+ movq %r15, 208(%rsp) # 8-byte Spill
+ adcq 520(%rsp), %rbp
+ movq %rbp, 184(%rsp) # 8-byte Spill
+ adcq 528(%rsp), %rbx
+ movq %rbx, 144(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ movq 152(%rsp), %r13 # 8-byte Reload
+ adcq $0, %r13
+ movq 112(%rsp), %rbx # 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 376(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 376(%rsp), %r15
+ movq 168(%rsp), %rax # 8-byte Reload
+ adcq 384(%rsp), %rax
+ movq 176(%rsp), %rcx # 8-byte Reload
+ adcq 392(%rsp), %rcx
+ movq %rcx, 176(%rsp) # 8-byte Spill
+ movq 192(%rsp), %rcx # 8-byte Reload
+ adcq 400(%rsp), %rcx
+ movq %rcx, 192(%rsp) # 8-byte Spill
+ movq 160(%rsp), %rbp # 8-byte Reload
+ adcq 408(%rsp), %rbp
+ movq 200(%rsp), %rcx # 8-byte Reload
+ adcq 416(%rsp), %rcx
+ movq %rcx, 200(%rsp) # 8-byte Spill
+ movq 208(%rsp), %rcx # 8-byte Reload
+ adcq 424(%rsp), %rcx
+ movq %rcx, 208(%rsp) # 8-byte Spill
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ movq 144(%rsp), %r15 # 8-byte Reload
+ adcq 440(%rsp), %r15
+ adcq 448(%rsp), %r14
+ movq %r14, 104(%rsp) # 8-byte Spill
+ adcq $0, %r13
+ movq %r13, %r14
+ adcq $0, %rbx
+ movq %rbx, 112(%rsp) # 8-byte Spill
+ adcq $0, %r12
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 120(%rsp), %rdx # 8-byte Folded Reload
+ leaq 296(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 296(%rsp), %rbx
+ movq 176(%rsp), %rax # 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq 192(%rsp), %r13 # 8-byte Reload
+ adcq 312(%rsp), %r13
+ adcq 320(%rsp), %rbp
+ movq 200(%rsp), %rcx # 8-byte Reload
+ adcq 328(%rsp), %rcx
+ movq %rcx, 200(%rsp) # 8-byte Spill
+ movq 208(%rsp), %rcx # 8-byte Reload
+ adcq 336(%rsp), %rcx
+ movq %rcx, 208(%rsp) # 8-byte Spill
+ movq 184(%rsp), %rcx # 8-byte Reload
+ adcq 344(%rsp), %rcx
+ movq %rcx, 184(%rsp) # 8-byte Spill
+ adcq 352(%rsp), %r15
+ movq %r15, 144(%rsp) # 8-byte Spill
+ movq 104(%rsp), %r15 # 8-byte Reload
+ adcq 360(%rsp), %r15
+ adcq 368(%rsp), %r14
+ movq %r14, 152(%rsp) # 8-byte Spill
+ movq 112(%rsp), %r14 # 8-byte Reload
+ adcq $0, %r14
+ adcq $0, %r12
+ movq 120(%rsp), %rdx # 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbx
+ leaq 216(%rsp), %rdi
+ movq 128(%rsp), %rsi # 8-byte Reload
+ callq .LmulPv576x64
+ addq 216(%rsp), %rbx
+ movq %r13, %rsi
+ adcq 224(%rsp), %rsi
+ movq %rsi, 192(%rsp) # 8-byte Spill
+ adcq 232(%rsp), %rbp
+ movq %rbp, 160(%rsp) # 8-byte Spill
+ movq 200(%rsp), %r9 # 8-byte Reload
+ adcq 240(%rsp), %r9
+ movq %r9, 200(%rsp) # 8-byte Spill
+ movq 208(%rsp), %r8 # 8-byte Reload
+ adcq 248(%rsp), %r8
+ movq %r8, 208(%rsp) # 8-byte Spill
+ movq 184(%rsp), %rbx # 8-byte Reload
+ adcq 256(%rsp), %rbx
+ movq 144(%rsp), %rax # 8-byte Reload
+ adcq 264(%rsp), %rax
+ movq %r15, %rcx
+ adcq 272(%rsp), %rcx
+ movq 152(%rsp), %rdx # 8-byte Reload
+ adcq 280(%rsp), %rdx
+ movq %rdx, 152(%rsp) # 8-byte Spill
+ adcq 288(%rsp), %r14
+ movq %r14, %r11
+ adcq $0, %r12
+ subq 16(%rsp), %rsi # 8-byte Folded Reload
+ movq %rbp, %rdi
+ sbbq 8(%rsp), %rdi # 8-byte Folded Reload
+ movq %r9, %rbp
+ sbbq 24(%rsp), %rbp # 8-byte Folded Reload
+ movq %r8, %r13
+ sbbq 32(%rsp), %r13 # 8-byte Folded Reload
+ movq %rbx, %r15
+ sbbq 40(%rsp), %r15 # 8-byte Folded Reload
+ movq %rax, %r14
+ sbbq 48(%rsp), %r14 # 8-byte Folded Reload
+ movq %rcx, %r10
+ sbbq 56(%rsp), %r10 # 8-byte Folded Reload
+ movq %rdx, %r8
+ sbbq 64(%rsp), %r8 # 8-byte Folded Reload
+ movq %r11, %r9
+ sbbq 72(%rsp), %r9 # 8-byte Folded Reload
+ sbbq $0, %r12
+ andl $1, %r12d
+ cmovneq %r11, %r9
+ testb %r12b, %r12b
+ cmovneq 192(%rsp), %rsi # 8-byte Folded Reload
+ movq 80(%rsp), %rdx # 8-byte Reload
+ movq %rsi, (%rdx)
+ cmovneq 160(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 8(%rdx)
+ cmovneq 200(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 16(%rdx)
+ cmovneq 208(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 24(%rdx)
+ cmovneq %rbx, %r15
+ movq %r15, 32(%rdx)
+ cmovneq %rax, %r14
+ movq %r14, 40(%rdx)
+ cmovneq %rcx, %r10
+ movq %r10, 48(%rdx)
+ cmovneq 152(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 56(%rdx)
+ movq %r9, 64(%rdx)
+ addq $936, %rsp # imm = 0x3A8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end132:
+ .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L
+
+ .globl mcl_fp_addPre9L
+ .align 16, 0x90
+ .type mcl_fp_addPre9L,@function
+mcl_fp_addPre9L: # @mcl_fp_addPre9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r8
+ movq 64(%rsi), %r15
+ movq 56(%rsi), %r9
+ movq 48(%rsi), %r10
+ movq 40(%rsi), %r11
+ movq 24(%rsi), %r12
+ movq 32(%rsi), %r14
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rcx
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rcx
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r12
+ movq 56(%rdx), %r13
+ movq 48(%rdx), %rsi
+ movq 40(%rdx), %rbp
+ movq 32(%rdx), %rdx
+ movq %rbx, (%rdi)
+ movq %rcx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r12, 24(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 32(%rdi)
+ adcq %r11, %rbp
+ movq %rbp, 40(%rdi)
+ adcq %r10, %rsi
+ movq %rsi, 48(%rdi)
+ adcq %r9, %r13
+ movq %r13, 56(%rdi)
+ adcq %r8, %r15
+ movq %r15, 64(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end133:
+ .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L
+
+ .globl mcl_fp_subPre9L
+ .align 16, 0x90
+ .type mcl_fp_subPre9L,@function
+mcl_fp_subPre9L: # @mcl_fp_subPre9L
+# BB#0:
+ movq 32(%rdx), %r8
+ movq (%rsi), %rcx
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ movq %rcx, (%rdi)
+ movq 8(%rsi), %rcx
+ sbbq 8(%rdx), %rcx
+ movq %rcx, 8(%rdi)
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq %rcx, 16(%rdi)
+ movq 24(%rsi), %rcx
+ sbbq 24(%rdx), %rcx
+ movq %rcx, 24(%rdi)
+ movq 32(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 40(%rdx), %r8
+ movq %rcx, 32(%rdi)
+ movq 40(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 48(%rdx), %r8
+ movq %rcx, 40(%rdi)
+ movq 48(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 56(%rdx), %r8
+ movq %rcx, 48(%rdi)
+ movq 56(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq %rcx, 56(%rdi)
+ movq 64(%rdx), %rcx
+ movq 64(%rsi), %rdx
+ sbbq %rcx, %rdx
+ movq %rdx, 64(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+.Lfunc_end134:
+ .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L
+
+ .globl mcl_fp_shr1_9L
+ .align 16, 0x90
+ .type mcl_fp_shr1_9L,@function
+mcl_fp_shr1_9L: # @mcl_fp_shr1_9L
+# BB#0:
+ pushq %rbx
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %r9
+ movq 48(%rsi), %r10
+ movq 40(%rsi), %r11
+ movq 32(%rsi), %rcx
+ movq 24(%rsi), %rdx
+ movq 16(%rsi), %rax
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rbx
+ movq %rbx, (%rdi)
+ shrdq $1, %rax, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rdx, %rax
+ movq %rax, 16(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 24(%rdi)
+ shrdq $1, %r11, %rcx
+ movq %rcx, 32(%rdi)
+ shrdq $1, %r10, %r11
+ movq %r11, 40(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 48(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 56(%rdi)
+ shrq %r8
+ movq %r8, 64(%rdi)
+ popq %rbx
+ retq
+.Lfunc_end135:
+ .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L
+
+ .globl mcl_fp_add9L
+ .align 16, 0x90
+ .type mcl_fp_add9L,@function
+mcl_fp_add9L: # @mcl_fp_add9L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r12
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %r13
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r10
+ movq 24(%rsi), %r14
+ movq 32(%rsi), %r11
+ movq (%rdx), %rbx
+ movq 8(%rdx), %r15
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %r15
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r14
+ adcq 32(%rdx), %r11
+ adcq 40(%rdx), %r10
+ movq 56(%rdx), %rsi
+ adcq 48(%rdx), %r9
+ movq %rbx, (%rdi)
+ movq %r15, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r14, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 56(%rdi)
+ adcq %r12, %r8
+ movq %r8, 64(%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %rbx
+ sbbq 8(%rcx), %r15
+ sbbq 16(%rcx), %rax
+ sbbq 24(%rcx), %r14
+ sbbq 32(%rcx), %r11
+ sbbq 40(%rcx), %r10
+ sbbq 48(%rcx), %r9
+ sbbq 56(%rcx), %rsi
+ sbbq 64(%rcx), %r8
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne .LBB136_2
+# BB#1: # %nocarry
+ movq %rbx, (%rdi)
+ movq %r15, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r14, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %rsi, 56(%rdi)
+ movq %r8, 64(%rdi)
+.LBB136_2: # %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end136:
+ .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L
+
+ .globl mcl_fp_addNF9L
+ .align 16, 0x90
+ .type mcl_fp_addNF9L,@function
+mcl_fp_addNF9L: # @mcl_fp_addNF9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, %r8
+ movq 64(%rdx), %r10
+ movq 56(%rdx), %r11
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rax
+ movq 32(%rdx), %rdi
+ movq 24(%rdx), %rbp
+ movq 16(%rdx), %r15
+ movq (%rdx), %rbx
+ movq 8(%rdx), %r13
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %r13
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %rbp
+ movq %rbp, -40(%rsp) # 8-byte Spill
+ adcq 32(%rsi), %rdi
+ movq %rdi, -16(%rsp) # 8-byte Spill
+ adcq 40(%rsi), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ adcq 48(%rsi), %r9
+ movq %r9, -32(%rsp) # 8-byte Spill
+ movq %r9, %rdi
+ adcq 56(%rsi), %r11
+ movq %r11, -24(%rsp) # 8-byte Spill
+ movq %r11, %rax
+ adcq 64(%rsi), %r10
+ movq %r10, %r9
+ movq %rbx, %rsi
+ subq (%rcx), %rsi
+ movq %r13, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r15, %r12
+ sbbq 16(%rcx), %r12
+ sbbq 24(%rcx), %rbp
+ movq -16(%rsp), %r14 # 8-byte Reload
+ sbbq 32(%rcx), %r14
+ movq -8(%rsp), %r11 # 8-byte Reload
+ sbbq 40(%rcx), %r11
+ movq %rdi, %r10
+ sbbq 48(%rcx), %r10
+ movq %rax, %rdi
+ sbbq 56(%rcx), %rdi
+ movq %r9, %rax
+ sbbq 64(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %rbx, %rsi
+ movq %rsi, (%r8)
+ cmovsq %r13, %rdx
+ movq %rdx, 8(%r8)
+ cmovsq %r15, %r12
+ movq %r12, 16(%r8)
+ cmovsq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rbp, 24(%r8)
+ cmovsq -16(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, 32(%r8)
+ cmovsq -8(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 40(%r8)
+ cmovsq -32(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 48(%r8)
+ cmovsq -24(%rsp), %rdi # 8-byte Folded Reload
+ movq %rdi, 56(%r8)
+ cmovsq %r9, %rax
+ movq %rax, 64(%r8)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end137:
+ .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L
+
+ .globl mcl_fp_sub9L
+ .align 16, 0x90
+ .type mcl_fp_sub9L,@function
+mcl_fp_sub9L: # @mcl_fp_sub9L
+# BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r13
+ movq (%rsi), %rax
+ movq 8(%rsi), %r9
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r9
+ movq 16(%rsi), %r10
+ sbbq 16(%rdx), %r10
+ movq 24(%rsi), %r11
+ sbbq 24(%rdx), %r11
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 40(%rsi), %r14
+ sbbq 40(%rdx), %r14
+ movq 48(%rsi), %r15
+ sbbq 48(%rdx), %r15
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %rsi
+ sbbq 56(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r12, 32(%rdi)
+ movq %r14, 40(%rdi)
+ movq %r15, 48(%rdi)
+ movq %rsi, 56(%rdi)
+ sbbq %r13, %r8
+ movq %r8, 64(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je .LBB138_2
+# BB#1: # %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ movq 8(%rcx), %rax
+ adcq %r9, %rax
+ movq %rax, 8(%rdi)
+ movq 16(%rcx), %rax
+ adcq %r10, %rax
+ movq %rax, 16(%rdi)
+ movq 24(%rcx), %rax
+ adcq %r11, %rax
+ movq %rax, 24(%rdi)
+ movq 32(%rcx), %rax
+ adcq %r12, %rax
+ movq %rax, 32(%rdi)
+ movq 40(%rcx), %rax
+ adcq %r14, %rax
+ movq %rax, 40(%rdi)
+ movq 48(%rcx), %rax
+ adcq %r15, %rax
+ movq %rax, 48(%rdi)
+ movq 56(%rcx), %rax
+ adcq %rsi, %rax
+ movq %rax, 56(%rdi)
+ movq 64(%rcx), %rax
+ adcq %r8, %rax
+ movq %rax, 64(%rdi)
+.LBB138_2: # %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+.Lfunc_end138:
+ .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L
+
+ .globl mcl_fp_subNF9L
+ .align 16, 0x90
+ .type mcl_fp_subNF9L,@function
+mcl_fp_subNF9L: # @mcl_fp_subNF9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq %rdi, %r11
+ movq 64(%rsi), %r14
+ movq 56(%rsi), %rax
+ movq 48(%rsi), %rcx
+ movq 40(%rsi), %rdi
+ movq 32(%rsi), %rbp
+ movq 24(%rsi), %rbx
+ movq 16(%rsi), %r15
+ movq (%rsi), %r12
+ movq 8(%rsi), %r13
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r13
+ sbbq 16(%rdx), %r15
+ sbbq 24(%rdx), %rbx
+ movq %rbx, -40(%rsp) # 8-byte Spill
+ sbbq 32(%rdx), %rbp
+ movq %rbp, -32(%rsp) # 8-byte Spill
+ sbbq 40(%rdx), %rdi
+ movq %rdi, -24(%rsp) # 8-byte Spill
+ sbbq 48(%rdx), %rcx
+ movq %rcx, -16(%rsp) # 8-byte Spill
+ sbbq 56(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ sbbq 64(%rdx), %r14
+ movq %r14, %rax
+ sarq $63, %rax
+ movq %rax, %rcx
+ shldq $1, %r14, %rcx
+ movq 24(%r8), %rbp
+ andq %rcx, %rbp
+ movq 8(%r8), %rdi
+ andq %rcx, %rdi
+ andq (%r8), %rcx
+ movq 64(%r8), %rbx
+ andq %rax, %rbx
+ movq 56(%r8), %r10
+ andq %rax, %r10
+ rolq %rax
+ movq 48(%r8), %r9
+ andq %rax, %r9
+ movq 40(%r8), %rsi
+ andq %rax, %rsi
+ movq 32(%r8), %rdx
+ andq %rax, %rdx
+ andq 16(%r8), %rax
+ addq %r12, %rcx
+ adcq %r13, %rdi
+ movq %rcx, (%r11)
+ adcq %r15, %rax
+ movq %rdi, 8(%r11)
+ adcq -40(%rsp), %rbp # 8-byte Folded Reload
+ movq %rax, 16(%r11)
+ movq %rbp, 24(%r11)
+ adcq -32(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, 32(%r11)
+ adcq -24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 40(%r11)
+ adcq -16(%rsp), %r9 # 8-byte Folded Reload
+ movq %r9, 48(%r11)
+ adcq -8(%rsp), %r10 # 8-byte Folded Reload
+ movq %r10, 56(%r11)
+ adcq %r14, %rbx
+ movq %rbx, 64(%r11)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end139:
+ .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L
+
+ .globl mcl_fpDbl_add9L
+ .align 16, 0x90
+ .type mcl_fpDbl_add9L,@function
+mcl_fpDbl_add9L: # @mcl_fpDbl_add9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r15
+ movq 136(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 128(%rdx), %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq 120(%rdx), %r10
+ movq 112(%rdx), %r11
+ movq 24(%rsi), %rcx
+ movq 32(%rsi), %r14
+ movq 16(%rdx), %rbp
+ movq (%rdx), %rax
+ movq 8(%rdx), %rbx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %rbp
+ adcq 24(%rdx), %rcx
+ adcq 32(%rdx), %r14
+ movq 104(%rdx), %r9
+ movq 96(%rdx), %r13
+ movq %rax, (%rdi)
+ movq 88(%rdx), %r8
+ movq %rbx, 8(%rdi)
+ movq 80(%rdx), %r12
+ movq %rbp, 16(%rdi)
+ movq 40(%rdx), %rax
+ movq %rcx, 24(%rdi)
+ movq 40(%rsi), %rbp
+ adcq %rax, %rbp
+ movq 48(%rdx), %rcx
+ movq %r14, 32(%rdi)
+ movq 48(%rsi), %rax
+ adcq %rcx, %rax
+ movq 56(%rdx), %r14
+ movq %rbp, 40(%rdi)
+ movq 56(%rsi), %rbp
+ adcq %r14, %rbp
+ movq 72(%rdx), %rcx
+ movq 64(%rdx), %rdx
+ movq %rax, 48(%rdi)
+ movq 64(%rsi), %rax
+ adcq %rdx, %rax
+ movq 136(%rsi), %rbx
+ movq %rbp, 56(%rdi)
+ movq 72(%rsi), %rbp
+ adcq %rcx, %rbp
+ movq 128(%rsi), %rcx
+ movq %rax, 64(%rdi)
+ movq 80(%rsi), %rdx
+ adcq %r12, %rdx
+ movq 88(%rsi), %r12
+ adcq %r8, %r12
+ movq 96(%rsi), %r14
+ adcq %r13, %r14
+ movq %r14, -48(%rsp) # 8-byte Spill
+ movq 104(%rsi), %rax
+ adcq %r9, %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 120(%rsi), %rax
+ movq 112(%rsi), %rsi
+ adcq %r11, %rsi
+ movq %rsi, -24(%rsp) # 8-byte Spill
+ adcq %r10, %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ adcq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -40(%rsp) # 8-byte Spill
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, -8(%rsp) # 8-byte Spill
+ sbbq %r9, %r9
+ andl $1, %r9d
+ movq %rbp, %r10
+ subq (%r15), %r10
+ movq %rdx, %r11
+ sbbq 8(%r15), %r11
+ movq %r12, %rbx
+ sbbq 16(%r15), %rbx
+ sbbq 24(%r15), %r14
+ movq -32(%rsp), %r13 # 8-byte Reload
+ sbbq 32(%r15), %r13
+ movq -24(%rsp), %rsi # 8-byte Reload
+ sbbq 40(%r15), %rsi
+ movq -16(%rsp), %rax # 8-byte Reload
+ sbbq 48(%r15), %rax
+ sbbq 56(%r15), %rcx
+ movq -8(%rsp), %r8 # 8-byte Reload
+ sbbq 64(%r15), %r8
+ sbbq $0, %r9
+ andl $1, %r9d
+ cmovneq %rbp, %r10
+ movq %r10, 72(%rdi)
+ testb %r9b, %r9b
+ cmovneq %rdx, %r11
+ movq %r11, 80(%rdi)
+ cmovneq %r12, %rbx
+ movq %rbx, 88(%rdi)
+ cmovneq -48(%rsp), %r14 # 8-byte Folded Reload
+ movq %r14, 96(%rdi)
+ cmovneq -32(%rsp), %r13 # 8-byte Folded Reload
+ movq %r13, 104(%rdi)
+ cmovneq -24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 112(%rdi)
+ cmovneq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 120(%rdi)
+ cmovneq -40(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 128(%rdi)
+ cmovneq -8(%rsp), %r8 # 8-byte Folded Reload
+ movq %r8, 136(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end140:
+ .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L
+
+ .globl mcl_fpDbl_sub9L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub9L,@function
+mcl_fpDbl_sub9L: # @mcl_fpDbl_sub9L
+# BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r14
+ movq 136(%rdx), %rax
+ movq %rax, -8(%rsp) # 8-byte Spill
+ movq 128(%rdx), %rax
+ movq %rax, -16(%rsp) # 8-byte Spill
+ movq 120(%rdx), %rax
+ movq %rax, -24(%rsp) # 8-byte Spill
+ movq 16(%rsi), %r11
+ movq (%rsi), %r12
+ movq 8(%rsi), %r13
+ xorl %r9d, %r9d
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r13
+ sbbq 16(%rdx), %r11
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %rbp
+ sbbq 32(%rdx), %rbp
+ movq 112(%rdx), %r10
+ movq 104(%rdx), %rcx
+ movq %r12, (%rdi)
+ movq 96(%rdx), %rax
+ movq %r13, 8(%rdi)
+ movq 88(%rdx), %r13
+ movq %r11, 16(%rdi)
+ movq 40(%rdx), %r11
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 48(%rdx), %r11
+ movq %rbp, 32(%rdi)
+ movq 48(%rsi), %rbp
+ sbbq %r11, %rbp
+ movq 56(%rdx), %r11
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 64(%rdx), %r11
+ movq %rbp, 48(%rdi)
+ movq 64(%rsi), %rbp
+ sbbq %r11, %rbp
+ movq 80(%rdx), %r8
+ movq 72(%rdx), %r11
+ movq %rbx, 56(%rdi)
+ movq 72(%rsi), %r15
+ sbbq %r11, %r15
+ movq 136(%rsi), %rdx
+ movq %rbp, 64(%rdi)
+ movq 80(%rsi), %rbp
+ sbbq %r8, %rbp
+ movq 88(%rsi), %r12
+ sbbq %r13, %r12
+ movq 96(%rsi), %r13
+ sbbq %rax, %r13
+ movq 104(%rsi), %rax
+ sbbq %rcx, %rax
+ movq %rax, -40(%rsp) # 8-byte Spill
+ movq 112(%rsi), %rax
+ sbbq %r10, %rax
+ movq %rax, -32(%rsp) # 8-byte Spill
+ movq 128(%rsi), %rax
+ movq 120(%rsi), %rcx
+ sbbq -24(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, -24(%rsp) # 8-byte Spill
+ sbbq -16(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, -16(%rsp) # 8-byte Spill
+ sbbq -8(%rsp), %rdx # 8-byte Folded Reload
+ movq %rdx, -8(%rsp) # 8-byte Spill
+ movl $0, %r8d
+ sbbq $0, %r8
+ andl $1, %r8d
+ movq (%r14), %r10
+ cmoveq %r9, %r10
+ testb %r8b, %r8b
+ movq 16(%r14), %r8
+ cmoveq %r9, %r8
+ movq 8(%r14), %rdx
+ cmoveq %r9, %rdx
+ movq 64(%r14), %rbx
+ cmoveq %r9, %rbx
+ movq 56(%r14), %r11
+ cmoveq %r9, %r11
+ movq 48(%r14), %rsi
+ cmoveq %r9, %rsi
+ movq 40(%r14), %rcx
+ cmoveq %r9, %rcx
+ movq 32(%r14), %rax
+ cmoveq %r9, %rax
+ cmovneq 24(%r14), %r9
+ addq %r15, %r10
+ adcq %rbp, %rdx
+ movq %r10, 72(%rdi)
+ adcq %r12, %r8
+ movq %rdx, 80(%rdi)
+ adcq %r13, %r9
+ movq %r8, 88(%rdi)
+ movq %r9, 96(%rdi)
+ adcq -40(%rsp), %rax # 8-byte Folded Reload
+ movq %rax, 104(%rdi)
+ adcq -32(%rsp), %rcx # 8-byte Folded Reload
+ movq %rcx, 112(%rdi)
+ adcq -24(%rsp), %rsi # 8-byte Folded Reload
+ movq %rsi, 120(%rdi)
+ adcq -16(%rsp), %r11 # 8-byte Folded Reload
+ movq %r11, 128(%rdi)
+ adcq -8(%rsp), %rbx # 8-byte Folded Reload
+ movq %rbx, 136(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+.Lfunc_end141:
+ .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L
+
+
+ .section ".note.GNU-stack","",@progbits
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/x86-64mac.bmi2.s b/vendor/github.com/tangerine-network/mcl/src/asm/x86-64mac.bmi2.s
new file mode 100644
index 000000000..849c66649
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/x86-64mac.bmi2.s
@@ -0,0 +1,13830 @@
+ .section __TEXT,__text,regular,pure_instructions
+ .macosx_version_min 10, 12
+ .globl _makeNIST_P192Lbmi2
+ .p2align 4, 0x90
+_makeNIST_P192Lbmi2: ## @makeNIST_P192Lbmi2
+## BB#0:
+ movq $-1, %rax
+ movq $-2, %rdx
+ movq $-1, %rcx
+ retq
+
+ .globl _mcl_fpDbl_mod_NIST_P192Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mod_NIST_P192Lbmi2: ## @mcl_fpDbl_mod_NIST_P192Lbmi2
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 16(%rsi), %r10
+ movq 24(%rsi), %r8
+ movq 40(%rsi), %r9
+ movq 8(%rsi), %rax
+ addq %r9, %rax
+ adcq $0, %r10
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq 32(%rsi), %r11
+ movq (%rsi), %r14
+ addq %r8, %r14
+ adcq %r11, %rax
+ adcq %r9, %r10
+ adcq $0, %rcx
+ addq %r9, %r14
+ adcq %r8, %rax
+ adcq %r11, %r10
+ adcq $0, %rcx
+ addq %rcx, %r14
+ adcq %rax, %rcx
+ adcq $0, %r10
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r14, %rsi
+ addq $1, %rsi
+ movq %rcx, %rdx
+ adcq $1, %rdx
+ movq %r10, %rbx
+ adcq $0, %rbx
+ adcq $-1, %rax
+ andl $1, %eax
+ cmovneq %r14, %rsi
+ movq %rsi, (%rdi)
+ testb %al, %al
+ cmovneq %rcx, %rdx
+ movq %rdx, 8(%rdi)
+ cmovneq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fp_sqr_NIST_P192Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sqr_NIST_P192Lbmi2: ## @mcl_fp_sqr_NIST_P192Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r8
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ movq %r8, %rdx
+ mulxq %rsi, %r14, %rbx
+ movq %rbx, -16(%rsp) ## 8-byte Spill
+ movq %rsi, %rdx
+ mulxq %rsi, %r13, %r15
+ mulxq %rcx, %r12, %rsi
+ addq %rsi, %r13
+ adcq %r14, %r15
+ adcq $0, %rbx
+ movq %rcx, %rdx
+ mulxq %rcx, %r9, %rax
+ addq %r12, %rax
+ movq %r8, %rdx
+ mulxq %rcx, %rbp, %r11
+ adcq %rbp, %rsi
+ movq %r11, %r10
+ adcq $0, %r10
+ addq %r12, %rax
+ adcq %r13, %rsi
+ adcq %r15, %r10
+ adcq $0, %rbx
+ mulxq %r8, %rcx, %rdi
+ addq %r14, %r11
+ adcq -16(%rsp), %rcx ## 8-byte Folded Reload
+ adcq $0, %rdi
+ addq %rbp, %rsi
+ adcq %r10, %r11
+ adcq %rbx, %rcx
+ adcq $0, %rdi
+ addq %rdi, %rax
+ adcq $0, %rsi
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ addq %r11, %r9
+ adcq %rcx, %rax
+ adcq %rdi, %rsi
+ adcq $0, %rdx
+ addq %rdi, %r9
+ adcq %r11, %rax
+ adcq %rcx, %rsi
+ adcq $0, %rdx
+ addq %rdx, %r9
+ adcq %rax, %rdx
+ adcq $0, %rsi
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r9, %rcx
+ addq $1, %rcx
+ movq %rdx, %rdi
+ adcq $1, %rdi
+ movq %rsi, %rbp
+ adcq $0, %rbp
+ adcq $-1, %rax
+ andl $1, %eax
+ cmovneq %r9, %rcx
+ movq -8(%rsp), %rbx ## 8-byte Reload
+ movq %rcx, (%rbx)
+ testb %al, %al
+ cmovneq %rdx, %rdi
+ movq %rdi, 8(%rbx)
+ cmovneq %rsi, %rbp
+ movq %rbp, 16(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mulNIST_P192Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulNIST_P192Lbmi2: ## @mcl_fp_mulNIST_P192Lbmi2
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ subq $56, %rsp
+ movq %rdi, %r14
+ leaq 8(%rsp), %rdi
+ callq _mcl_fpDbl_mulPre3Lbmi2
+ movq 24(%rsp), %r9
+ movq 32(%rsp), %r8
+ movq 48(%rsp), %rdi
+ movq 16(%rsp), %rbx
+ addq %rdi, %rbx
+ adcq $0, %r9
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq 40(%rsp), %rsi
+ movq 8(%rsp), %rdx
+ addq %r8, %rdx
+ adcq %rsi, %rbx
+ adcq %rdi, %r9
+ adcq $0, %rcx
+ addq %rdi, %rdx
+ adcq %r8, %rbx
+ adcq %rsi, %r9
+ adcq $0, %rcx
+ addq %rcx, %rdx
+ adcq %rbx, %rcx
+ adcq $0, %r9
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rdx, %rdi
+ addq $1, %rdi
+ movq %rcx, %rbx
+ adcq $1, %rbx
+ movq %r9, %rax
+ adcq $0, %rax
+ adcq $-1, %rsi
+ andl $1, %esi
+ cmovneq %rdx, %rdi
+ movq %rdi, (%r14)
+ testb %sil, %sil
+ cmovneq %rcx, %rbx
+ movq %rbx, 8(%r14)
+ cmovneq %r9, %rax
+ movq %rax, 16(%r14)
+ addq $56, %rsp
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fpDbl_mod_NIST_P521Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mod_NIST_P521Lbmi2: ## @mcl_fpDbl_mod_NIST_P521Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 120(%rsi), %r9
+ movq 128(%rsi), %r14
+ movq %r14, %r8
+ shldq $55, %r9, %r8
+ movq 112(%rsi), %r10
+ shldq $55, %r10, %r9
+ movq 104(%rsi), %r11
+ shldq $55, %r11, %r10
+ movq 96(%rsi), %r15
+ shldq $55, %r15, %r11
+ movq 88(%rsi), %r12
+ shldq $55, %r12, %r15
+ movq 80(%rsi), %rcx
+ shldq $55, %rcx, %r12
+ movq 64(%rsi), %rbx
+ movq 72(%rsi), %rax
+ shldq $55, %rax, %rcx
+ shrq $9, %r14
+ shldq $55, %rbx, %rax
+ ## kill: %EBX<def> %EBX<kill> %RBX<kill> %RBX<def>
+ andl $511, %ebx ## imm = 0x1FF
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r12
+ adcq 24(%rsi), %r15
+ adcq 32(%rsi), %r11
+ adcq 40(%rsi), %r10
+ adcq 48(%rsi), %r9
+ adcq 56(%rsi), %r8
+ adcq %r14, %rbx
+ movl %ebx, %esi
+ shrl $9, %esi
+ andl $1, %esi
+ addq %rax, %rsi
+ adcq $0, %rcx
+ adcq $0, %r12
+ adcq $0, %r15
+ adcq $0, %r11
+ adcq $0, %r10
+ adcq $0, %r9
+ adcq $0, %r8
+ adcq $0, %rbx
+ movq %rsi, %rax
+ andq %r12, %rax
+ andq %r15, %rax
+ andq %r11, %rax
+ andq %r10, %rax
+ andq %r9, %rax
+ andq %r8, %rax
+ movq %rbx, %rdx
+ orq $-512, %rdx ## imm = 0xFE00
+ andq %rax, %rdx
+ andq %rcx, %rdx
+ cmpq $-1, %rdx
+ je LBB4_1
+## BB#3: ## %nonzero
+ movq %rsi, (%rdi)
+ movq %rcx, 8(%rdi)
+ movq %r12, 16(%rdi)
+ movq %r15, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %r8, 56(%rdi)
+ andl $511, %ebx ## imm = 0x1FF
+ movq %rbx, 64(%rdi)
+ jmp LBB4_2
+LBB4_1: ## %zero
+ movq $0, 64(%rdi)
+ movq $0, 56(%rdi)
+ movq $0, 48(%rdi)
+ movq $0, 40(%rdi)
+ movq $0, 32(%rdi)
+ movq $0, 24(%rdi)
+ movq $0, 16(%rdi)
+ movq $0, 8(%rdi)
+ movq $0, (%rdi)
+LBB4_2: ## %zero
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_mulUnitPre1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre1Lbmi2: ## @mcl_fp_mulUnitPre1Lbmi2
+## BB#0:
+ mulxq (%rsi), %rcx, %rax
+ movq %rcx, (%rdi)
+ movq %rax, 8(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_mulPre1Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre1Lbmi2: ## @mcl_fpDbl_mulPre1Lbmi2
+## BB#0:
+ movq (%rdx), %rdx
+ mulxq (%rsi), %rcx, %rax
+ movq %rcx, (%rdi)
+ movq %rax, 8(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_sqrPre1Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre1Lbmi2: ## @mcl_fpDbl_sqrPre1Lbmi2
+## BB#0:
+ movq (%rsi), %rdx
+ mulxq %rdx, %rcx, %rax
+ movq %rcx, (%rdi)
+ movq %rax, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_mont1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont1Lbmi2: ## @mcl_fp_mont1Lbmi2
+## BB#0:
+ movq %rdx, %rax
+ movq (%rsi), %rdx
+ mulxq (%rax), %rsi, %r8
+ movq -8(%rcx), %rdx
+ imulq %rsi, %rdx
+ movq (%rcx), %rcx
+ mulxq %rcx, %rdx, %rax
+ addq %rsi, %rdx
+ adcq %r8, %rax
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ movq %rax, %rsi
+ subq %rcx, %rsi
+ sbbq $0, %rdx
+ testb $1, %dl
+ cmovneq %rax, %rsi
+ movq %rsi, (%rdi)
+ retq
+
+ .globl _mcl_fp_montNF1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF1Lbmi2: ## @mcl_fp_montNF1Lbmi2
+## BB#0:
+ movq %rdx, %rax
+ movq (%rsi), %rdx
+ mulxq (%rax), %rsi, %r8
+ movq -8(%rcx), %rdx
+ imulq %rsi, %rdx
+ movq (%rcx), %rcx
+ mulxq %rcx, %rdx, %rax
+ addq %rsi, %rdx
+ adcq %r8, %rax
+ movq %rax, %rdx
+ subq %rcx, %rdx
+ cmovsq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+
+ .globl _mcl_fp_montRed1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed1Lbmi2: ## @mcl_fp_montRed1Lbmi2
+## BB#0:
+ movq (%rsi), %rcx
+ movq -8(%rdx), %rax
+ imulq %rcx, %rax
+ movq (%rdx), %r8
+ movq %rax, %rdx
+ mulxq %r8, %rax, %rdx
+ addq %rcx, %rax
+ adcq 8(%rsi), %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rcx
+ subq %r8, %rcx
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rcx
+ movq %rcx, (%rdi)
+ retq
+
+ .globl _mcl_fp_addPre1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre1Lbmi2: ## @mcl_fp_addPre1Lbmi2
+## BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, (%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre1Lbmi2: ## @mcl_fp_subPre1Lbmi2
+## BB#0:
+ movq (%rsi), %rcx
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ movq %rcx, (%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_1Lbmi2: ## @mcl_fp_shr1_1Lbmi2
+## BB#0:
+ movq (%rsi), %rax
+ shrq %rax
+ movq %rax, (%rdi)
+ retq
+
+ .globl _mcl_fp_add1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add1Lbmi2: ## @mcl_fp_add1Lbmi2
+## BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, (%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %rax
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne LBB14_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+LBB14_2: ## %carry
+ retq
+
+ .globl _mcl_fp_addNF1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF1Lbmi2: ## @mcl_fp_addNF1Lbmi2
+## BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, %rdx
+ subq (%rcx), %rdx
+ cmovsq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+
+ .globl _mcl_fp_sub1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub1Lbmi2: ## @mcl_fp_sub1Lbmi2
+## BB#0:
+ movq (%rsi), %rax
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ movq %rax, (%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB16_2
+## BB#1: ## %nocarry
+ retq
+LBB16_2: ## %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ retq
+
+ .globl _mcl_fp_subNF1Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF1Lbmi2: ## @mcl_fp_subNF1Lbmi2
+## BB#0:
+ movq (%rsi), %rax
+ subq (%rdx), %rax
+ movq %rax, %rdx
+ sarq $63, %rdx
+ andq (%rcx), %rdx
+ addq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+
+ .globl _mcl_fpDbl_add1Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add1Lbmi2: ## @mcl_fpDbl_add1Lbmi2
+## BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ movq %rax, (%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rsi
+ subq (%rcx), %rsi
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_sub1Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub1Lbmi2: ## @mcl_fpDbl_sub1Lbmi2
+## BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %r8
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r8
+ movq %rax, (%rdi)
+ movl $0, %eax
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq (%rcx), %rsi
+ addq %r8, %rsi
+ movq %rsi, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_mulUnitPre2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre2Lbmi2: ## @mcl_fp_mulUnitPre2Lbmi2
+## BB#0:
+ mulxq 8(%rsi), %rax, %rcx
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %rax, %rsi
+ movq %rsi, 8(%rdi)
+ adcq $0, %rcx
+ movq %rcx, 16(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_mulPre2Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre2Lbmi2: ## @mcl_fpDbl_mulPre2Lbmi2
+## BB#0:
+ movq %rdx, %r10
+ movq (%rsi), %r11
+ movq 8(%rsi), %r8
+ movq (%r10), %rsi
+ movq %r11, %rdx
+ mulxq %rsi, %rdx, %r9
+ movq %rdx, (%rdi)
+ movq %r8, %rdx
+ mulxq %rsi, %rsi, %rax
+ addq %r9, %rsi
+ adcq $0, %rax
+ movq 8(%r10), %rcx
+ movq %r11, %rdx
+ mulxq %rcx, %rdx, %r9
+ addq %rsi, %rdx
+ movq %rdx, 8(%rdi)
+ movq %r8, %rdx
+ mulxq %rcx, %rdx, %rcx
+ adcq %rax, %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r9, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %rcx, %rax
+ movq %rax, 24(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_sqrPre2Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre2Lbmi2: ## @mcl_fpDbl_sqrPre2Lbmi2
+## BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %rcx
+ movq %rax, %rdx
+ mulxq %rax, %rdx, %rsi
+ movq %rdx, (%rdi)
+ movq %rcx, %rdx
+ mulxq %rax, %rdx, %r8
+ addq %rdx, %rsi
+ movq %r8, %rax
+ adcq $0, %rax
+ addq %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %rcx
+ adcq %rax, %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r8, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %rcx, %rax
+ movq %rax, 24(%rdi)
+ retq
+
+ .globl _mcl_fp_mont2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont2Lbmi2: ## @mcl_fp_mont2Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %r11
+ movq %r9, %rdx
+ mulxq %rax, %r10, %r13
+ movq %r8, %rdx
+ mulxq %rax, %r14, %rsi
+ addq %r10, %rsi
+ adcq $0, %r13
+ movq -8(%rcx), %rbp
+ movq (%rcx), %r10
+ movq %r14, %rdx
+ imulq %rbp, %rdx
+ movq 8(%rcx), %r15
+ mulxq %r15, %r12, %rcx
+ mulxq %r10, %rdx, %rbx
+ addq %r12, %rbx
+ adcq $0, %rcx
+ addq %r14, %rdx
+ adcq %rsi, %rbx
+ adcq %r13, %rcx
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %r11, %rdx
+ mulxq %r9, %r9, %r14
+ movq %r11, %rdx
+ mulxq %r8, %r8, %rax
+ addq %r9, %rax
+ adcq $0, %r14
+ addq %rbx, %r8
+ adcq %rcx, %rax
+ adcq %rsi, %r14
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ imulq %r8, %rbp
+ movq %rbp, %rdx
+ mulxq %r15, %rcx, %rbx
+ mulxq %r10, %rdx, %rbp
+ addq %rcx, %rbp
+ adcq $0, %rbx
+ addq %r8, %rdx
+ adcq %rax, %rbp
+ adcq %r14, %rbx
+ adcq $0, %rsi
+ movq %rbp, %rax
+ subq %r10, %rax
+ movq %rbx, %rcx
+ sbbq %r15, %rcx
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rbx, %rcx
+ testb %sil, %sil
+ cmovneq %rbp, %rax
+ movq %rax, (%rdi)
+ movq %rcx, 8(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF2Lbmi2: ## @mcl_fp_montNF2Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %r11
+ movq %r9, %rdx
+ mulxq %rax, %r10, %rsi
+ movq %r8, %rdx
+ mulxq %rax, %r15, %r13
+ addq %r10, %r13
+ adcq $0, %rsi
+ movq -8(%rcx), %rbp
+ movq (%rcx), %r10
+ movq %r15, %rdx
+ imulq %rbp, %rdx
+ movq 8(%rcx), %r14
+ mulxq %r10, %rcx, %r12
+ addq %r15, %rcx
+ mulxq %r14, %rbx, %rcx
+ adcq %r13, %rbx
+ adcq $0, %rsi
+ addq %r12, %rbx
+ adcq %rcx, %rsi
+ movq %r11, %rdx
+ mulxq %r9, %r9, %rcx
+ movq %r11, %rdx
+ mulxq %r8, %r8, %rax
+ addq %r9, %rax
+ adcq $0, %rcx
+ addq %rbx, %r8
+ adcq %rsi, %rax
+ adcq $0, %rcx
+ imulq %r8, %rbp
+ movq %rbp, %rdx
+ mulxq %r14, %rbx, %rsi
+ mulxq %r10, %rbp, %rdx
+ addq %r8, %rbp
+ adcq %rax, %rbx
+ adcq $0, %rcx
+ addq %rdx, %rbx
+ adcq %rsi, %rcx
+ movq %rbx, %rax
+ subq %r10, %rax
+ movq %rcx, %rdx
+ sbbq %r14, %rdx
+ cmovsq %rbx, %rax
+ movq %rax, (%rdi)
+ cmovsq %rcx, %rdx
+ movq %rdx, 8(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed2Lbmi2: ## @mcl_fp_montRed2Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq -8(%rdx), %r15
+ movq (%rdx), %r8
+ movq (%rsi), %r10
+ movq %r10, %rcx
+ imulq %r15, %rcx
+ movq 8(%rdx), %r9
+ movq %rcx, %rdx
+ mulxq %r9, %r11, %r14
+ mulxq %r8, %rcx, %rax
+ addq %r11, %rax
+ adcq $0, %r14
+ movq 24(%rsi), %r11
+ addq %r10, %rcx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r14
+ adcq $0, %r11
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ imulq %rax, %r15
+ movq %r15, %rdx
+ mulxq %r9, %r10, %rbx
+ mulxq %r8, %rsi, %rdx
+ addq %r10, %rdx
+ adcq $0, %rbx
+ addq %rax, %rsi
+ adcq %r14, %rdx
+ adcq %r11, %rbx
+ adcq $0, %rcx
+ movq %rdx, %rax
+ subq %r8, %rax
+ movq %rbx, %rsi
+ sbbq %r9, %rsi
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rbx, %rsi
+ testb %cl, %cl
+ cmovneq %rdx, %rax
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addPre2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre2Lbmi2: ## @mcl_fp_addPre2Lbmi2
+## BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rcx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rcx
+ movq %rax, (%rdi)
+ movq %rcx, 8(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre2Lbmi2: ## @mcl_fp_subPre2Lbmi2
+## BB#0:
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_2Lbmi2: ## @mcl_fp_shr1_2Lbmi2
+## BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %rcx
+ shrdq $1, %rcx, %rax
+ movq %rax, (%rdi)
+ shrq %rcx
+ movq %rcx, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_add2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add2Lbmi2: ## @mcl_fp_add2Lbmi2
+## BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB29_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+LBB29_2: ## %carry
+ retq
+
+ .globl _mcl_fp_addNF2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF2Lbmi2: ## @mcl_fp_addNF2Lbmi2
+## BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %r8
+ addq (%rsi), %rax
+ adcq 8(%rsi), %r8
+ movq %rax, %rsi
+ subq (%rcx), %rsi
+ movq %r8, %rdx
+ sbbq 8(%rcx), %rdx
+ testq %rdx, %rdx
+ cmovsq %rax, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r8, %rdx
+ movq %rdx, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_sub2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub2Lbmi2: ## @mcl_fp_sub2Lbmi2
+## BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %r8
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r8
+ movq %rax, (%rdi)
+ movq %r8, 8(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB31_2
+## BB#1: ## %nocarry
+ retq
+LBB31_2: ## %carry
+ movq 8(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r8, %rdx
+ movq %rdx, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_subNF2Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF2Lbmi2: ## @mcl_fp_subNF2Lbmi2
+## BB#0:
+ movq (%rsi), %r8
+ movq 8(%rsi), %rsi
+ subq (%rdx), %r8
+ sbbq 8(%rdx), %rsi
+ movq %rsi, %rdx
+ sarq $63, %rdx
+ movq 8(%rcx), %rax
+ andq %rdx, %rax
+ andq (%rcx), %rdx
+ addq %r8, %rdx
+ movq %rdx, (%rdi)
+ adcq %rsi, %rax
+ movq %rax, 8(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_add2Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add2Lbmi2: ## @mcl_fpDbl_add2Lbmi2
+## BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rdx), %r10
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r10
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ adcq %r8, %r9
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r10, %rdx
+ subq (%rcx), %rdx
+ movq %r9, %rsi
+ sbbq 8(%rcx), %rsi
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %r10, %rdx
+ movq %rdx, 16(%rdi)
+ testb %al, %al
+ cmovneq %r9, %rsi
+ movq %rsi, 24(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_sub2Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub2Lbmi2: ## @mcl_fpDbl_sub2Lbmi2
+## BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %r11
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %r11
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %r11, (%rdi)
+ movq %rsi, 8(%rdi)
+ sbbq %r8, %r9
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ cmovneq 8(%rcx), %rax
+ addq %r10, %rsi
+ movq %rsi, 16(%rdi)
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ retq
+
+ .globl _mcl_fp_mulUnitPre3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre3Lbmi2: ## @mcl_fp_mulUnitPre3Lbmi2
+## BB#0:
+ mulxq 16(%rsi), %r8, %rcx
+ mulxq 8(%rsi), %r9, %rax
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %r9, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r8, %rax
+ movq %rax, 16(%rdi)
+ adcq $0, %rcx
+ movq %rcx, 24(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_mulPre3Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre3Lbmi2: ## @mcl_fpDbl_mulPre3Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq %rdx, %r9
+ movq (%rsi), %r10
+ movq 8(%rsi), %r8
+ movq (%r9), %rax
+ movq %r10, %rdx
+ mulxq %rax, %rdx, %r14
+ movq 16(%rsi), %r11
+ movq %rdx, (%rdi)
+ movq %r11, %rdx
+ mulxq %rax, %rsi, %rbx
+ movq %r8, %rdx
+ mulxq %rax, %rax, %rcx
+ addq %r14, %rax
+ adcq %rsi, %rcx
+ adcq $0, %rbx
+ movq 8(%r9), %rsi
+ movq %r10, %rdx
+ mulxq %rsi, %rdx, %r14
+ addq %rax, %rdx
+ movq %rdx, 8(%rdi)
+ movq %r11, %rdx
+ mulxq %rsi, %rax, %r15
+ movq %r8, %rdx
+ mulxq %rsi, %rsi, %rdx
+ adcq %rcx, %rsi
+ adcq %rbx, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r14, %rsi
+ adcq %rdx, %rax
+ adcq %r15, %rcx
+ movq 16(%r9), %rbx
+ movq %r10, %rdx
+ mulxq %rbx, %rdx, %r9
+ addq %rsi, %rdx
+ movq %rdx, 16(%rdi)
+ movq %r11, %rdx
+ mulxq %rbx, %rsi, %r10
+ movq %r8, %rdx
+ mulxq %rbx, %rbx, %rdx
+ adcq %rax, %rbx
+ adcq %rcx, %rsi
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r9, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %rdx, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r10, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_sqrPre3Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre3Lbmi2: ## @mcl_fpDbl_sqrPre3Lbmi2
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 16(%rsi), %r10
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %rax
+ movq %rdx, (%rdi)
+ movq %r10, %rdx
+ mulxq %rcx, %r11, %r8
+ movq %rsi, %rdx
+ mulxq %rcx, %rdx, %r14
+ addq %rdx, %rax
+ movq %r14, %rbx
+ adcq %r11, %rbx
+ movq %r8, %rcx
+ adcq $0, %rcx
+ addq %rdx, %rax
+ movq %rax, 8(%rdi)
+ movq %r10, %rdx
+ mulxq %rsi, %rax, %r9
+ movq %rsi, %rdx
+ mulxq %rsi, %rsi, %rdx
+ adcq %rbx, %rsi
+ adcq %rax, %rcx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq %r14, %rsi
+ adcq %rdx, %rcx
+ adcq %r9, %rbx
+ addq %r11, %rsi
+ movq %rsi, 16(%rdi)
+ movq %r10, %rdx
+ mulxq %r10, %rsi, %rdx
+ adcq %rax, %rcx
+ adcq %rbx, %rsi
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r8, %rcx
+ movq %rcx, 24(%rdi)
+ adcq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %rdx, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fp_mont3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont3Lbmi2: ## @mcl_fp_mont3Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r14
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r12
+ movq (%r14), %rax
+ movq %r14, -16(%rsp) ## 8-byte Spill
+ movq %r12, %rdx
+ movq %r12, -24(%rsp) ## 8-byte Spill
+ mulxq %rax, %r11, %rbp
+ movq (%rsi), %r15
+ movq 8(%rsi), %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbx, %r8
+ movq %r15, %rdx
+ movq %r15, -32(%rsp) ## 8-byte Spill
+ mulxq %rax, %r9, %rdi
+ addq %rbx, %rdi
+ adcq %r11, %r8
+ adcq $0, %rbp
+ movq -8(%rcx), %r13
+ movq %r9, %rdx
+ imulq %r13, %rdx
+ movq 8(%rcx), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulxq %rax, %r11, %r10
+ movq (%rcx), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulxq %rax, %rsi, %rbx
+ addq %r11, %rbx
+ movq 16(%rcx), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulxq %rax, %rcx, %rax
+ adcq %r10, %rcx
+ adcq $0, %rax
+ addq %r9, %rsi
+ adcq %rdi, %rbx
+ movq 8(%r14), %rdx
+ adcq %r8, %rcx
+ adcq %rbp, %rax
+ sbbq %r9, %r9
+ andl $1, %r9d
+ mulxq %r12, %r11, %rdi
+ movq -48(%rsp), %r12 ## 8-byte Reload
+ mulxq %r12, %r10, %rsi
+ mulxq %r15, %r8, %rbp
+ addq %r10, %rbp
+ adcq %r11, %rsi
+ adcq $0, %rdi
+ addq %rbx, %r8
+ adcq %rcx, %rbp
+ adcq %rax, %rsi
+ adcq %r9, %rdi
+ sbbq %r11, %r11
+ andl $1, %r11d
+ movq %r8, %rdx
+ imulq %r13, %rdx
+ movq -40(%rsp), %r14 ## 8-byte Reload
+ mulxq %r14, %r9, %rcx
+ mulxq -56(%rsp), %r10, %rax ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rdx, %rbx ## 8-byte Folded Reload
+ addq %r10, %rbx
+ adcq %r9, %rax
+ adcq $0, %rcx
+ addq %r8, %rdx
+ adcq %rbp, %rbx
+ adcq %rsi, %rax
+ adcq %rdi, %rcx
+ adcq $0, %r11
+ movq -16(%rsp), %rdx ## 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq -24(%rsp), %r9, %rsi ## 8-byte Folded Reload
+ mulxq %r12, %r10, %r15
+ mulxq -32(%rsp), %r8, %rdi ## 8-byte Folded Reload
+ addq %r10, %rdi
+ adcq %r9, %r15
+ adcq $0, %rsi
+ addq %rbx, %r8
+ adcq %rax, %rdi
+ adcq %rcx, %r15
+ adcq %r11, %rsi
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ imulq %r8, %r13
+ movq %r13, %rdx
+ mulxq %r14, %r9, %rbp
+ movq %r14, %r12
+ movq -56(%rsp), %r14 ## 8-byte Reload
+ mulxq %r14, %r10, %rax
+ movq -64(%rsp), %rcx ## 8-byte Reload
+ mulxq %rcx, %r11, %rdx
+ addq %r10, %rdx
+ adcq %r9, %rax
+ adcq $0, %rbp
+ addq %r8, %r11
+ adcq %rdi, %rdx
+ adcq %r15, %rax
+ adcq %rsi, %rbp
+ adcq $0, %rbx
+ movq %rdx, %rsi
+ subq %rcx, %rsi
+ movq %rax, %rdi
+ sbbq %r14, %rdi
+ movq %rbp, %rcx
+ sbbq %r12, %rcx
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %rbp, %rcx
+ testb %bl, %bl
+ cmovneq %rdx, %rsi
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rsi, (%rdx)
+ cmovneq %rax, %rdi
+ movq %rdi, 8(%rdx)
+ movq %rcx, 16(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF3Lbmi2: ## @mcl_fp_montNF3Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq %rdx, %r10
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rdi
+ movq %rdi, -32(%rsp) ## 8-byte Spill
+ movq (%r10), %rax
+ movq %r10, -16(%rsp) ## 8-byte Spill
+ movq %rdi, %rdx
+ mulxq %rax, %rbx, %r14
+ movq %rcx, %rdx
+ movq %rcx, -24(%rsp) ## 8-byte Spill
+ mulxq %rax, %r15, %r12
+ movq 16(%rsi), %r11
+ addq %rbx, %r12
+ movq %r11, %rdx
+ mulxq %rax, %rsi, %rbx
+ adcq %r14, %rsi
+ adcq $0, %rbx
+ movq -8(%r8), %r9
+ movq (%r8), %r14
+ movq %r15, %rdx
+ imulq %r9, %rdx
+ mulxq %r14, %rbp, %r13
+ addq %r15, %rbp
+ movq 8(%r8), %r15
+ mulxq %r15, %rdi, %rbp
+ adcq %r12, %rdi
+ movq 16(%r8), %r12
+ mulxq %r12, %rax, %r8
+ adcq %rsi, %rax
+ adcq $0, %rbx
+ addq %r13, %rdi
+ movq 8(%r10), %rdx
+ adcq %rbp, %rax
+ adcq %r8, %rbx
+ movq -32(%rsp), %r10 ## 8-byte Reload
+ mulxq %r10, %rsi, %r8
+ mulxq %rcx, %r13, %rbp
+ addq %rsi, %rbp
+ mulxq %r11, %rcx, %rsi
+ adcq %r8, %rcx
+ adcq $0, %rsi
+ addq %rdi, %r13
+ adcq %rax, %rbp
+ adcq %rbx, %rcx
+ adcq $0, %rsi
+ movq %r13, %rdx
+ imulq %r9, %rdx
+ mulxq %r14, %rdi, %rbx
+ addq %r13, %rdi
+ mulxq %r15, %rax, %rdi
+ adcq %rbp, %rax
+ mulxq %r12, %rbp, %rdx
+ adcq %rcx, %rbp
+ adcq $0, %rsi
+ addq %rbx, %rax
+ adcq %rdi, %rbp
+ adcq %rdx, %rsi
+ movq -16(%rsp), %rcx ## 8-byte Reload
+ movq 16(%rcx), %rdx
+ mulxq %r10, %rbx, %r8
+ mulxq -24(%rsp), %r10, %rdi ## 8-byte Folded Reload
+ addq %rbx, %rdi
+ mulxq %r11, %rcx, %rbx
+ adcq %r8, %rcx
+ adcq $0, %rbx
+ addq %rax, %r10
+ adcq %rbp, %rdi
+ adcq %rsi, %rcx
+ adcq $0, %rbx
+ imulq %r10, %r9
+ movq %r9, %rdx
+ mulxq %r14, %rdx, %r8
+ addq %r10, %rdx
+ movq %r9, %rdx
+ mulxq %r12, %rbp, %rsi
+ mulxq %r15, %rax, %rdx
+ adcq %rdi, %rax
+ adcq %rcx, %rbp
+ adcq $0, %rbx
+ addq %r8, %rax
+ adcq %rdx, %rbp
+ adcq %rsi, %rbx
+ movq %rax, %rcx
+ subq %r14, %rcx
+ movq %rbp, %rdx
+ sbbq %r15, %rdx
+ movq %rbx, %rsi
+ sbbq %r12, %rsi
+ movq %rsi, %rdi
+ sarq $63, %rdi
+ cmovsq %rax, %rcx
+ movq -8(%rsp), %rax ## 8-byte Reload
+ movq %rcx, (%rax)
+ cmovsq %rbp, %rdx
+ movq %rdx, 8(%rax)
+ cmovsq %rbx, %rsi
+ movq %rsi, 16(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed3Lbmi2: ## @mcl_fp_montRed3Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %r15
+ movq (%rcx), %r9
+ movq (%rsi), %rbx
+ movq %rbx, %rdx
+ imulq %r15, %rdx
+ movq 16(%rcx), %rax
+ mulxq %rax, %r14, %r11
+ movq %rax, %rbp
+ movq %rbp, -16(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %r10
+ mulxq %r10, %rax, %r13
+ mulxq %r9, %rdx, %rcx
+ addq %rax, %rcx
+ adcq %r14, %r13
+ adcq $0, %r11
+ movq 40(%rsi), %r14
+ movq 32(%rsi), %r12
+ addq %rbx, %rdx
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r13
+ adcq 24(%rsi), %r11
+ adcq $0, %r12
+ adcq $0, %r14
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rcx, %rdx
+ imulq %r15, %rdx
+ mulxq %rbp, %rbp, %rdi
+ mulxq %r10, %r8, %rbx
+ mulxq %r9, %rdx, %rax
+ addq %r8, %rax
+ adcq %rbp, %rbx
+ adcq $0, %rdi
+ addq %rcx, %rdx
+ adcq %r13, %rax
+ adcq %r11, %rbx
+ adcq %r12, %rdi
+ adcq $0, %r14
+ adcq $0, %rsi
+ imulq %rax, %r15
+ movq %r15, %rdx
+ movq -16(%rsp), %r13 ## 8-byte Reload
+ mulxq %r13, %r8, %rcx
+ movq %r15, %rdx
+ mulxq %r10, %r11, %r12
+ mulxq %r9, %r15, %rdx
+ addq %r11, %rdx
+ adcq %r8, %r12
+ adcq $0, %rcx
+ addq %rax, %r15
+ adcq %rbx, %rdx
+ adcq %rdi, %r12
+ adcq %r14, %rcx
+ adcq $0, %rsi
+ movq %rdx, %rax
+ subq %r9, %rax
+ movq %r12, %rdi
+ sbbq %r10, %rdi
+ movq %rcx, %rbp
+ sbbq %r13, %rbp
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rcx, %rbp
+ testb %sil, %sil
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rcx ## 8-byte Reload
+ movq %rax, (%rcx)
+ cmovneq %r12, %rdi
+ movq %rdi, 8(%rcx)
+ movq %rbp, 16(%rcx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre3Lbmi2: ## @mcl_fp_addPre3Lbmi2
+## BB#0:
+ movq 16(%rdx), %rax
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rax
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre3Lbmi2: ## @mcl_fp_subPre3Lbmi2
+## BB#0:
+ movq 16(%rsi), %r8
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r8
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_3Lbmi2: ## @mcl_fp_shr1_3Lbmi2
+## BB#0:
+ movq 16(%rsi), %rax
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rdx
+ shrdq $1, %rdx, %rcx
+ movq %rcx, (%rdi)
+ shrdq $1, %rax, %rdx
+ movq %rdx, 8(%rdi)
+ shrq %rax
+ movq %rax, 16(%rdi)
+ retq
+
+ .globl _mcl_fp_add3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add3Lbmi2: ## @mcl_fp_add3Lbmi2
+## BB#0:
+ movq 16(%rdx), %r8
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r8
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB44_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r8, 16(%rdi)
+LBB44_2: ## %carry
+ retq
+
+ .globl _mcl_fp_addNF3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF3Lbmi2: ## @mcl_fp_addNF3Lbmi2
+## BB#0:
+ movq 16(%rdx), %r8
+ movq (%rdx), %r10
+ movq 8(%rdx), %r9
+ addq (%rsi), %r10
+ adcq 8(%rsi), %r9
+ adcq 16(%rsi), %r8
+ movq %r10, %rsi
+ subq (%rcx), %rsi
+ movq %r9, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r8, %rax
+ sbbq 16(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r10, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 16(%rdi)
+ retq
+
+ .globl _mcl_fp_sub3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub3Lbmi2: ## @mcl_fp_sub3Lbmi2
+## BB#0:
+ movq 16(%rsi), %r8
+ movq (%rsi), %rax
+ movq 8(%rsi), %r9
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r9
+ sbbq 16(%rdx), %r8
+ movq %rax, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB46_2
+## BB#1: ## %nocarry
+ retq
+LBB46_2: ## %carry
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rsi
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r8, %rsi
+ movq %rsi, 16(%rdi)
+ retq
+
+ .globl _mcl_fp_subNF3Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF3Lbmi2: ## @mcl_fp_subNF3Lbmi2
+## BB#0:
+ movq 16(%rsi), %r10
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ subq (%rdx), %r8
+ sbbq 8(%rdx), %r9
+ sbbq 16(%rdx), %r10
+ movq %r10, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rsi
+ shldq $1, %r10, %rsi
+ andq (%rcx), %rsi
+ movq 16(%rcx), %rax
+ andq %rdx, %rax
+ andq 8(%rcx), %rdx
+ addq %r8, %rsi
+ movq %rsi, (%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r10, %rax
+ movq %rax, 16(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_add3Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add3Lbmi2: ## @mcl_fpDbl_add3Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r10
+ movq 40(%rsi), %r8
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r9
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r14, %r15
+ adcq %r11, %r9
+ adcq %r10, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r15, %rdx
+ subq (%rcx), %rdx
+ movq %r9, %rsi
+ sbbq 8(%rcx), %rsi
+ movq %r8, %rbx
+ sbbq 16(%rcx), %rbx
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %r15, %rdx
+ movq %rdx, 24(%rdi)
+ testb %al, %al
+ cmovneq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ cmovneq %r8, %rbx
+ movq %rbx, 40(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_sub3Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub3Lbmi2: ## @mcl_fpDbl_sub3Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r10
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rax
+ xorl %esi, %esi
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rax
+ movq 24(%rdx), %r15
+ movq 32(%rdx), %r12
+ sbbq 16(%rdx), %r14
+ movq %rbx, (%rdi)
+ movq %rax, 8(%rdi)
+ movq %r14, 16(%rdi)
+ sbbq %r15, %r11
+ sbbq %r12, %r9
+ sbbq %r10, %r8
+ movl $0, %eax
+ sbbq $0, %rax
+ andl $1, %eax
+ movq (%rcx), %rdx
+ cmoveq %rsi, %rdx
+ testb %al, %al
+ movq 16(%rcx), %rax
+ cmoveq %rsi, %rax
+ cmovneq 8(%rcx), %rsi
+ addq %r11, %rdx
+ movq %rdx, 24(%rdi)
+ adcq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r8, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_mulUnitPre4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre4Lbmi2: ## @mcl_fp_mulUnitPre4Lbmi2
+## BB#0:
+ mulxq 24(%rsi), %r8, %r11
+ mulxq 16(%rsi), %r9, %rax
+ mulxq 8(%rsi), %r10, %rcx
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %r10, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r9, %rcx
+ movq %rcx, 16(%rdi)
+ adcq %r8, %rax
+ movq %rax, 24(%rdi)
+ adcq $0, %r11
+ movq %r11, 32(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_mulPre4Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre4Lbmi2: ## @mcl_fpDbl_mulPre4Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq (%rsi), %r14
+ movq 8(%rsi), %r10
+ movq (%rdx), %rcx
+ movq %rdx, %rbp
+ movq %r14, %rdx
+ mulxq %rcx, %rdx, %r15
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r9
+ movq %rdx, (%rdi)
+ movq %r10, %rdx
+ mulxq %rcx, %rbx, %r12
+ addq %r15, %rbx
+ movq %r9, %rdx
+ mulxq %rcx, %r13, %r15
+ adcq %r12, %r13
+ movq %r11, %rdx
+ mulxq %rcx, %rcx, %r12
+ adcq %r15, %rcx
+ adcq $0, %r12
+ movq 8(%rbp), %rax
+ movq %r14, %rdx
+ mulxq %rax, %r8, %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ addq %rbx, %r8
+ movq %r10, %rdx
+ mulxq %rax, %r15, %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ adcq %r13, %r15
+ movq %r9, %rdx
+ mulxq %rax, %rbx, %r13
+ adcq %rcx, %rbx
+ movq %r11, %rdx
+ mulxq %rax, %rcx, %rax
+ adcq %r12, %rcx
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -8(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -16(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %r13, %rcx
+ movq %r8, 8(%rdi)
+ adcq %rax, %r12
+ movq %rbp, %r13
+ movq 16(%r13), %rax
+ movq %r14, %rdx
+ mulxq %rax, %rdx, %r8
+ addq %r15, %rdx
+ movq %rdx, 16(%rdi)
+ movq %r10, %rdx
+ mulxq %rax, %rbp, %r10
+ adcq %rbx, %rbp
+ movq %r11, %rdx
+ mulxq %rax, %r14, %r11
+ movq %r9, %rdx
+ mulxq %rax, %r15, %rdx
+ adcq %rcx, %r15
+ adcq %r12, %r14
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r8, %rbp
+ adcq %r10, %r15
+ adcq %rdx, %r14
+ adcq %r11, %rcx
+ movq 24(%r13), %rdx
+ mulxq 24(%rsi), %rbx, %r8
+ mulxq (%rsi), %rax, %r9
+ addq %rbp, %rax
+ movq %rax, 24(%rdi)
+ mulxq 16(%rsi), %rbp, %rax
+ mulxq 8(%rsi), %rsi, %rdx
+ adcq %r15, %rsi
+ adcq %r14, %rbp
+ adcq %rcx, %rbx
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %rdx, %rbp
+ movq %rbp, 40(%rdi)
+ adcq %rax, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r8, %rcx
+ movq %rcx, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre4Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre4Lbmi2: ## @mcl_fpDbl_sqrPre4Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 24(%rsi), %r8
+ movq 16(%rsi), %r9
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rax
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %r11
+ movq %rdx, (%rdi)
+ movq %r9, %rdx
+ mulxq %rcx, %rbp, %r10
+ movq %rbp, -16(%rsp) ## 8-byte Spill
+ movq %r10, -8(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rcx, %r12, %r15
+ addq %r12, %r11
+ movq %r15, %rbx
+ adcq %rbp, %rbx
+ movq %r8, %rdx
+ mulxq %rcx, %rcx, %r13
+ adcq %r10, %rcx
+ adcq $0, %r13
+ addq %r12, %r11
+ movq %rax, %rdx
+ mulxq %rax, %rbp, %r12
+ adcq %rbx, %rbp
+ movq %r8, %rdx
+ mulxq %rax, %r10, %rbx
+ movq %r9, %rdx
+ mulxq %rax, %r14, %rdx
+ adcq %r14, %rcx
+ adcq %r13, %r10
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r15, %rbp
+ adcq %r12, %rcx
+ adcq %rdx, %r10
+ movq %rdx, %r12
+ adcq %rbx, %rax
+ movq %r11, 8(%rdi)
+ addq -16(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 16(%rdi)
+ movq %r8, %rdx
+ mulxq %r9, %r11, %r8
+ movq %r9, %rdx
+ mulxq %r9, %r15, %rdx
+ adcq %r14, %rcx
+ adcq %r10, %r15
+ adcq %rax, %r11
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq -8(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r12, %r15
+ adcq %rdx, %r11
+ adcq %r8, %rax
+ movq 24(%rsi), %rdx
+ mulxq 16(%rsi), %rbx, %r8
+ mulxq 8(%rsi), %rbp, %r9
+ mulxq (%rsi), %rsi, %r10
+ addq %rcx, %rsi
+ movq %rsi, 24(%rdi)
+ adcq %r15, %rbp
+ adcq %r11, %rbx
+ mulxq %rdx, %rdx, %rcx
+ adcq %rax, %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %r10, %rbp
+ movq %rbp, 32(%rdi)
+ adcq %r9, %rbx
+ movq %rbx, 40(%rdi)
+ adcq %r8, %rdx
+ movq %rdx, 48(%rdi)
+ adcq %rcx, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont4Lbmi2: ## @mcl_fp_mont4Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r13
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %rdi
+ movq %rdi, -32(%rsp) ## 8-byte Spill
+ movq (%r13), %rax
+ movq %r13, -16(%rsp) ## 8-byte Spill
+ movq %rdi, %rdx
+ mulxq %rax, %rdi, %r11
+ movq 16(%rsi), %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbx, %r10
+ movq (%rsi), %rbp
+ movq %rbp, -48(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ mulxq %rax, %rsi, %r12
+ movq %rbp, %rdx
+ mulxq %rax, %r14, %r8
+ addq %rsi, %r8
+ adcq %rbx, %r12
+ adcq %rdi, %r10
+ adcq $0, %r11
+ movq -8(%rcx), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %r14, %rdx
+ imulq %rax, %rdx
+ movq 24(%rcx), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulxq %rax, %r15, %rax
+ movq 16(%rcx), %rsi
+ movq %rsi, -80(%rsp) ## 8-byte Spill
+ mulxq %rsi, %r9, %rsi
+ movq (%rcx), %rbp
+ movq %rbp, -24(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -72(%rsp) ## 8-byte Spill
+ mulxq %rcx, %rdi, %rcx
+ mulxq %rbp, %rdx, %rbx
+ addq %rdi, %rbx
+ adcq %r9, %rcx
+ adcq %r15, %rsi
+ adcq $0, %rax
+ addq %r14, %rdx
+ adcq %r8, %rbx
+ adcq %r12, %rcx
+ adcq %r10, %rsi
+ adcq %r11, %rax
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ movq 8(%r13), %rdx
+ mulxq -32(%rsp), %r12, %r10 ## 8-byte Folded Reload
+ mulxq -40(%rsp), %r15, %r11 ## 8-byte Folded Reload
+ mulxq -56(%rsp), %r14, %rbp ## 8-byte Folded Reload
+ mulxq -48(%rsp), %r8, %r9 ## 8-byte Folded Reload
+ addq %r14, %r9
+ adcq %r15, %rbp
+ adcq %r12, %r11
+ adcq $0, %r10
+ addq %rbx, %r8
+ adcq %rcx, %r9
+ adcq %rsi, %rbp
+ adcq %rax, %r11
+ adcq %rdi, %r10
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ movq %r8, %rdx
+ imulq -88(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -64(%rsp), %r14, %rcx ## 8-byte Folded Reload
+ mulxq -80(%rsp), %r15, %rsi ## 8-byte Folded Reload
+ mulxq -72(%rsp), %r12, %rax ## 8-byte Folded Reload
+ movq -24(%rsp), %r13 ## 8-byte Reload
+ mulxq %r13, %rdx, %rdi
+ addq %r12, %rdi
+ adcq %r15, %rax
+ adcq %r14, %rsi
+ adcq $0, %rcx
+ addq %r8, %rdx
+ adcq %r9, %rdi
+ adcq %rbp, %rax
+ adcq %r11, %rsi
+ adcq %r10, %rcx
+ adcq $0, %rbx
+ movq -16(%rsp), %rdx ## 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq -32(%rsp), %r14, %r11 ## 8-byte Folded Reload
+ mulxq -40(%rsp), %r15, %rbp ## 8-byte Folded Reload
+ mulxq -56(%rsp), %r12, %r8 ## 8-byte Folded Reload
+ mulxq -48(%rsp), %r9, %r10 ## 8-byte Folded Reload
+ addq %r12, %r10
+ adcq %r15, %r8
+ adcq %r14, %rbp
+ adcq $0, %r11
+ addq %rdi, %r9
+ adcq %rax, %r10
+ adcq %rsi, %r8
+ adcq %rcx, %rbp
+ adcq %rbx, %r11
+ sbbq %rax, %rax
+ movq %r9, %rdx
+ imulq -88(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -72(%rsp), %rcx, %rsi ## 8-byte Folded Reload
+ mulxq %r13, %r14, %rdi
+ addq %rcx, %rdi
+ mulxq -80(%rsp), %rcx, %r15 ## 8-byte Folded Reload
+ adcq %rsi, %rcx
+ movq -64(%rsp), %r13 ## 8-byte Reload
+ mulxq %r13, %rbx, %rsi
+ adcq %r15, %rbx
+ adcq $0, %rsi
+ andl $1, %eax
+ addq %r9, %r14
+ adcq %r10, %rdi
+ adcq %r8, %rcx
+ adcq %rbp, %rbx
+ adcq %r11, %rsi
+ adcq $0, %rax
+ movq -16(%rsp), %rdx ## 8-byte Reload
+ movq 24(%rdx), %rdx
+ mulxq -32(%rsp), %r11, %r8 ## 8-byte Folded Reload
+ mulxq -40(%rsp), %r15, %r9 ## 8-byte Folded Reload
+ mulxq -56(%rsp), %r12, %r14 ## 8-byte Folded Reload
+ mulxq -48(%rsp), %r10, %rbp ## 8-byte Folded Reload
+ addq %r12, %rbp
+ adcq %r15, %r14
+ adcq %r11, %r9
+ adcq $0, %r8
+ addq %rdi, %r10
+ adcq %rcx, %rbp
+ adcq %rbx, %r14
+ adcq %rsi, %r9
+ adcq %rax, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ imulq %r10, %rdx
+ mulxq %r13, %rcx, %rdi
+ movq %rcx, -88(%rsp) ## 8-byte Spill
+ mulxq -80(%rsp), %r15, %rsi ## 8-byte Folded Reload
+ movq -72(%rsp), %rbx ## 8-byte Reload
+ mulxq %rbx, %r12, %rcx
+ movq -24(%rsp), %r11 ## 8-byte Reload
+ mulxq %r11, %rdx, %r13
+ addq %r12, %r13
+ adcq %r15, %rcx
+ adcq -88(%rsp), %rsi ## 8-byte Folded Reload
+ adcq $0, %rdi
+ addq %r10, %rdx
+ adcq %rbp, %r13
+ adcq %r14, %rcx
+ adcq %r9, %rsi
+ adcq %r8, %rdi
+ adcq $0, %rax
+ movq %r13, %rdx
+ subq %r11, %rdx
+ movq %rcx, %rbp
+ sbbq %rbx, %rbp
+ movq %rsi, %r8
+ sbbq -80(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rdi, %rbx
+ sbbq -64(%rsp), %rbx ## 8-byte Folded Reload
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rdi, %rbx
+ testb %al, %al
+ cmovneq %r13, %rdx
+ movq -8(%rsp), %rax ## 8-byte Reload
+ movq %rdx, (%rax)
+ cmovneq %rcx, %rbp
+ movq %rbp, 8(%rax)
+ cmovneq %rsi, %r8
+ movq %r8, 16(%rax)
+ movq %rbx, 24(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF4Lbmi2: ## @mcl_fp_montNF4Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq (%rsi), %rdi
+ movq %rdi, -56(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rbp
+ movq %rbp, -64(%rsp) ## 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdx, %r15
+ movq %r15, -24(%rsp) ## 8-byte Spill
+ movq %rbp, %rdx
+ mulxq %rax, %rbp, %r9
+ movq %rdi, %rdx
+ mulxq %rax, %r12, %rbx
+ movq 16(%rsi), %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ addq %rbp, %rbx
+ mulxq %rax, %r14, %rbp
+ adcq %r9, %r14
+ movq 24(%rsi), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rax, %r8, %rdi
+ adcq %rbp, %r8
+ adcq $0, %rdi
+ movq -8(%rcx), %r13
+ movq (%rcx), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %r12, %rdx
+ imulq %r13, %rdx
+ mulxq %rax, %rax, %r11
+ addq %r12, %rax
+ movq 8(%rcx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbp, %r10
+ adcq %rbx, %rbp
+ movq 16(%rcx), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulxq %rax, %rsi, %rbx
+ adcq %r14, %rsi
+ movq 24(%rcx), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulxq %rax, %rcx, %rdx
+ adcq %r8, %rcx
+ adcq $0, %rdi
+ addq %r11, %rbp
+ adcq %r10, %rsi
+ adcq %rbx, %rcx
+ adcq %rdx, %rdi
+ movq 8(%r15), %rdx
+ movq -64(%rsp), %r12 ## 8-byte Reload
+ mulxq %r12, %rbx, %r9
+ movq -56(%rsp), %r15 ## 8-byte Reload
+ mulxq %r15, %r10, %r11
+ addq %rbx, %r11
+ mulxq -40(%rsp), %rax, %r8 ## 8-byte Folded Reload
+ adcq %r9, %rax
+ mulxq -80(%rsp), %r9, %rbx ## 8-byte Folded Reload
+ adcq %r8, %r9
+ adcq $0, %rbx
+ addq %rbp, %r10
+ adcq %rsi, %r11
+ adcq %rcx, %rax
+ adcq %rdi, %r9
+ adcq $0, %rbx
+ movq %r10, %rdx
+ imulq %r13, %rdx
+ movq -48(%rsp), %r14 ## 8-byte Reload
+ mulxq %r14, %rcx, %r8
+ addq %r10, %rcx
+ mulxq -16(%rsp), %r10, %rdi ## 8-byte Folded Reload
+ adcq %r11, %r10
+ mulxq -32(%rsp), %rcx, %rsi ## 8-byte Folded Reload
+ adcq %rax, %rcx
+ mulxq -72(%rsp), %rax, %rdx ## 8-byte Folded Reload
+ adcq %r9, %rax
+ adcq $0, %rbx
+ addq %r8, %r10
+ adcq %rdi, %rcx
+ adcq %rsi, %rax
+ adcq %rdx, %rbx
+ movq -24(%rsp), %rdx ## 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq %r12, %rsi, %r8
+ mulxq %r15, %r11, %rbp
+ addq %rsi, %rbp
+ movq -40(%rsp), %r12 ## 8-byte Reload
+ mulxq %r12, %rdi, %r9
+ adcq %r8, %rdi
+ mulxq -80(%rsp), %r8, %rsi ## 8-byte Folded Reload
+ adcq %r9, %r8
+ adcq $0, %rsi
+ addq %r10, %r11
+ adcq %rcx, %rbp
+ adcq %rax, %rdi
+ adcq %rbx, %r8
+ adcq $0, %rsi
+ movq %r11, %rdx
+ imulq %r13, %rdx
+ mulxq %r14, %rax, %r10
+ addq %r11, %rax
+ movq -16(%rsp), %r14 ## 8-byte Reload
+ mulxq %r14, %r9, %rbx
+ adcq %rbp, %r9
+ movq -32(%rsp), %r15 ## 8-byte Reload
+ mulxq %r15, %rax, %rbp
+ adcq %rdi, %rax
+ mulxq -72(%rsp), %rcx, %rdx ## 8-byte Folded Reload
+ adcq %r8, %rcx
+ adcq $0, %rsi
+ addq %r10, %r9
+ adcq %rbx, %rax
+ adcq %rbp, %rcx
+ adcq %rdx, %rsi
+ movq -24(%rsp), %rdx ## 8-byte Reload
+ movq 24(%rdx), %rdx
+ mulxq -64(%rsp), %rbx, %r8 ## 8-byte Folded Reload
+ mulxq -56(%rsp), %r11, %rbp ## 8-byte Folded Reload
+ addq %rbx, %rbp
+ mulxq %r12, %rdi, %r10
+ adcq %r8, %rdi
+ mulxq -80(%rsp), %r8, %rbx ## 8-byte Folded Reload
+ adcq %r10, %r8
+ adcq $0, %rbx
+ addq %r9, %r11
+ adcq %rax, %rbp
+ adcq %rcx, %rdi
+ adcq %rsi, %r8
+ adcq $0, %rbx
+ imulq %r11, %r13
+ movq %r13, %rdx
+ movq -48(%rsp), %r12 ## 8-byte Reload
+ mulxq %r12, %rcx, %r9
+ addq %r11, %rcx
+ mulxq %r14, %r11, %r10
+ adcq %rbp, %r11
+ movq %r15, %rsi
+ mulxq %rsi, %rax, %rcx
+ adcq %rdi, %rax
+ movq -72(%rsp), %rbp ## 8-byte Reload
+ mulxq %rbp, %r15, %rdx
+ adcq %r8, %r15
+ adcq $0, %rbx
+ addq %r9, %r11
+ adcq %r10, %rax
+ adcq %rcx, %r15
+ adcq %rdx, %rbx
+ movq %r11, %rcx
+ subq %r12, %rcx
+ movq %rax, %rdx
+ sbbq %r14, %rdx
+ movq %r15, %rdi
+ sbbq %rsi, %rdi
+ movq %rbx, %rsi
+ sbbq %rbp, %rsi
+ cmovsq %r11, %rcx
+ movq -8(%rsp), %rbp ## 8-byte Reload
+ movq %rcx, (%rbp)
+ cmovsq %rax, %rdx
+ movq %rdx, 8(%rbp)
+ cmovsq %r15, %rdi
+ movq %rdi, 16(%rbp)
+ cmovsq %rbx, %rsi
+ movq %rsi, 24(%rbp)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed4Lbmi2: ## @mcl_fp_montRed4Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %r13
+ movq (%rcx), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq (%rsi), %r10
+ movq %r10, %rdx
+ imulq %r13, %rdx
+ movq 24(%rcx), %rdi
+ mulxq %rdi, %r9, %r15
+ movq %rdi, %r14
+ movq %r14, -40(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rdi
+ movq %rdi, -48(%rsp) ## 8-byte Spill
+ mulxq %rdi, %rdi, %rbx
+ movq 8(%rcx), %rcx
+ movq %rcx, -56(%rsp) ## 8-byte Spill
+ mulxq %rcx, %rcx, %r8
+ mulxq %rax, %rdx, %rbp
+ addq %rcx, %rbp
+ adcq %rdi, %r8
+ adcq %r9, %rbx
+ adcq $0, %r15
+ movq 56(%rsi), %r11
+ movq 48(%rsi), %rcx
+ addq %r10, %rdx
+ movq 40(%rsi), %r12
+ adcq 8(%rsi), %rbp
+ adcq 16(%rsi), %r8
+ adcq 24(%rsi), %rbx
+ adcq 32(%rsi), %r15
+ adcq $0, %r12
+ adcq $0, %rcx
+ movq %rcx, -64(%rsp) ## 8-byte Spill
+ adcq $0, %r11
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rbp, %rdx
+ imulq %r13, %rdx
+ mulxq %r14, %rax, %r9
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulxq -48(%rsp), %r14, %rdi ## 8-byte Folded Reload
+ mulxq -56(%rsp), %r10, %rcx ## 8-byte Folded Reload
+ mulxq -32(%rsp), %rdx, %rax ## 8-byte Folded Reload
+ addq %r10, %rax
+ adcq %r14, %rcx
+ adcq -72(%rsp), %rdi ## 8-byte Folded Reload
+ adcq $0, %r9
+ addq %rbp, %rdx
+ adcq %r8, %rax
+ adcq %rbx, %rcx
+ adcq %r15, %rdi
+ adcq %r12, %r9
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r11
+ movq %r11, -72(%rsp) ## 8-byte Spill
+ adcq $0, %rsi
+ movq %rax, %rdx
+ imulq %r13, %rdx
+ movq -40(%rsp), %r15 ## 8-byte Reload
+ mulxq %r15, %rbp, %r8
+ movq %rbp, -16(%rsp) ## 8-byte Spill
+ movq -48(%rsp), %r11 ## 8-byte Reload
+ mulxq %r11, %rbx, %r10
+ movq %rbx, -24(%rsp) ## 8-byte Spill
+ mulxq -56(%rsp), %r12, %rbp ## 8-byte Folded Reload
+ movq -32(%rsp), %r14 ## 8-byte Reload
+ mulxq %r14, %rdx, %rbx
+ addq %r12, %rbx
+ adcq -24(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -16(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %r8
+ addq %rax, %rdx
+ adcq %rcx, %rbx
+ adcq %rdi, %rbp
+ adcq %r9, %r10
+ adcq -64(%rsp), %r8 ## 8-byte Folded Reload
+ adcq $0, -72(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rsi
+ imulq %rbx, %r13
+ movq %r13, %rdx
+ mulxq %r15, %rax, %rdi
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %r13, %rdx
+ mulxq %r11, %r9, %rax
+ movq -56(%rsp), %r11 ## 8-byte Reload
+ mulxq %r11, %r12, %rcx
+ mulxq %r14, %r15, %r13
+ addq %r12, %r13
+ adcq %r9, %rcx
+ adcq -64(%rsp), %rax ## 8-byte Folded Reload
+ adcq $0, %rdi
+ addq %rbx, %r15
+ adcq %rbp, %r13
+ adcq %r10, %rcx
+ adcq %r8, %rax
+ adcq -72(%rsp), %rdi ## 8-byte Folded Reload
+ adcq $0, %rsi
+ movq %r13, %rdx
+ subq %r14, %rdx
+ movq %rcx, %rbp
+ sbbq %r11, %rbp
+ movq %rax, %r8
+ sbbq -48(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rdi, %rbx
+ sbbq -40(%rsp), %rbx ## 8-byte Folded Reload
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rdi, %rbx
+ testb %sil, %sil
+ cmovneq %r13, %rdx
+ movq -8(%rsp), %rsi ## 8-byte Reload
+ movq %rdx, (%rsi)
+ cmovneq %rcx, %rbp
+ movq %rbp, 8(%rsi)
+ cmovneq %rax, %r8
+ movq %r8, 16(%rsi)
+ movq %rbx, 24(%rsi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre4Lbmi2: ## @mcl_fp_addPre4Lbmi2
+## BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rdx), %rax
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rax
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ adcq %r8, %r9
+ movq %r9, 24(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre4Lbmi2: ## @mcl_fp_subPre4Lbmi2
+## BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r10, 16(%rdi)
+ sbbq %r8, %r9
+ movq %r9, 24(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_4Lbmi2: ## @mcl_fp_shr1_4Lbmi2
+## BB#0:
+ movq 24(%rsi), %rax
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rdx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rdx
+ movq %rdx, (%rdi)
+ shrdq $1, %rcx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rax, %rcx
+ movq %rcx, 16(%rdi)
+ shrq %rax
+ movq %rax, 24(%rdi)
+ retq
+
+ .globl _mcl_fp_add4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add4Lbmi2: ## @mcl_fp_add4Lbmi2
+## BB#0:
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %r8
+ movq 16(%rdx), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r9
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r9, 16(%rdi)
+ adcq %r10, %r8
+ movq %r8, 24(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r9
+ sbbq 24(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB59_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r9, 16(%rdi)
+ movq %r8, 24(%rdi)
+LBB59_2: ## %carry
+ retq
+
+ .globl _mcl_fp_addNF4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF4Lbmi2: ## @mcl_fp_addNF4Lbmi2
+## BB#0:
+ pushq %rbx
+ movq 24(%rdx), %r8
+ movq 16(%rdx), %r9
+ movq (%rdx), %r11
+ movq 8(%rdx), %r10
+ addq (%rsi), %r11
+ adcq 8(%rsi), %r10
+ adcq 16(%rsi), %r9
+ adcq 24(%rsi), %r8
+ movq %r11, %rsi
+ subq (%rcx), %rsi
+ movq %r10, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r9, %rax
+ sbbq 16(%rcx), %rax
+ movq %r8, %rbx
+ sbbq 24(%rcx), %rbx
+ testq %rbx, %rbx
+ cmovsq %r11, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r10, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r9, %rax
+ movq %rax, 16(%rdi)
+ cmovsq %r8, %rbx
+ movq %rbx, 24(%rdi)
+ popq %rbx
+ retq
+
+ .globl _mcl_fp_sub4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub4Lbmi2: ## @mcl_fp_sub4Lbmi2
+## BB#0:
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %r8
+ movq 16(%rsi), %r9
+ movq (%rsi), %rax
+ movq 8(%rsi), %r11
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r9
+ movq %rax, (%rdi)
+ movq %r11, 8(%rdi)
+ movq %r9, 16(%rdi)
+ sbbq %r10, %r8
+ movq %r8, 24(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB61_2
+## BB#1: ## %nocarry
+ retq
+LBB61_2: ## %carry
+ movq 24(%rcx), %r10
+ movq 8(%rcx), %rsi
+ movq 16(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r8, %r10
+ movq %r10, 24(%rdi)
+ retq
+
+ .globl _mcl_fp_subNF4Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF4Lbmi2: ## @mcl_fp_subNF4Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r8
+ movdqu (%rsi), %xmm2
+ movdqu 16(%rsi), %xmm3
+ pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1]
+ movd %xmm4, %r15
+ movd %xmm1, %r9
+ movd %xmm3, %r11
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %r10
+ pshufd $78, %xmm2, %xmm1 ## xmm1 = xmm2[2,3,0,1]
+ movd %xmm1, %r14
+ movd %xmm0, %rdx
+ movd %xmm2, %r12
+ subq %rdx, %r12
+ sbbq %r10, %r14
+ sbbq %r9, %r11
+ sbbq %r8, %r15
+ movq %r15, %rdx
+ sarq $63, %rdx
+ movq 24(%rcx), %rsi
+ andq %rdx, %rsi
+ movq 16(%rcx), %rax
+ andq %rdx, %rax
+ movq 8(%rcx), %rbx
+ andq %rdx, %rbx
+ andq (%rcx), %rdx
+ addq %r12, %rdx
+ movq %rdx, (%rdi)
+ adcq %r14, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r11, %rax
+ movq %rax, 16(%rdi)
+ adcq %r15, %rsi
+ movq %rsi, 24(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_add4Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add4Lbmi2: ## @mcl_fpDbl_add4Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r9
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r10
+ movq 48(%rsi), %r12
+ movq 40(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq 24(%rdx), %r15
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %rsi
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r15, %rbp
+ movq %rbp, 24(%rdi)
+ adcq %r14, %rsi
+ adcq %r11, %r13
+ adcq %r10, %r12
+ adcq %r9, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rsi, %rdx
+ subq (%rcx), %rdx
+ movq %r13, %rbp
+ sbbq 8(%rcx), %rbp
+ movq %r12, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r8, %r9
+ sbbq 24(%rcx), %r9
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rsi, %rdx
+ movq %rdx, 32(%rdi)
+ testb %al, %al
+ cmovneq %r13, %rbp
+ movq %rbp, 40(%rdi)
+ cmovneq %r12, %rbx
+ movq %rbx, 48(%rdi)
+ cmovneq %r8, %r9
+ movq %r9, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub4Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub4Lbmi2: ## @mcl_fpDbl_sub4Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r9
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq (%rsi), %rbx
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ movq %rbx, (%rdi)
+ movq 8(%rsi), %rbx
+ sbbq 8(%rdx), %rbx
+ movq %rbx, 8(%rdi)
+ movq 16(%rsi), %rbx
+ sbbq 16(%rdx), %rbx
+ movq %rbx, 16(%rdi)
+ movq 24(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 40(%rdx), %r11
+ movq 32(%rdx), %rdx
+ movq %rbx, 24(%rdi)
+ movq 32(%rsi), %r12
+ sbbq %rdx, %r12
+ movq 48(%rsi), %r14
+ movq 40(%rsi), %r15
+ sbbq %r11, %r15
+ sbbq %r10, %r14
+ sbbq %r9, %r8
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ movq 16(%rcx), %rdx
+ cmoveq %rax, %rdx
+ movq 24(%rcx), %rbx
+ cmoveq %rax, %rbx
+ cmovneq 8(%rcx), %rax
+ addq %r12, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r15, %rax
+ movq %rax, 40(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 48(%rdi)
+ adcq %r8, %rbx
+ movq %rbx, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_mulUnitPre5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre5Lbmi2: ## @mcl_fp_mulUnitPre5Lbmi2
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ mulxq 32(%rsi), %r8, %r11
+ mulxq 24(%rsi), %r9, %rax
+ mulxq 16(%rsi), %r10, %rcx
+ mulxq 8(%rsi), %r14, %rbx
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %r14, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r9, %rcx
+ movq %rcx, 24(%rdi)
+ adcq %r8, %rax
+ movq %rax, 32(%rdi)
+ adcq $0, %r11
+ movq %r11, 40(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fpDbl_mulPre5Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre5Lbmi2: ## @mcl_fpDbl_mulPre5Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movq %rdi, -40(%rsp) ## 8-byte Spill
+ movq (%rsi), %r11
+ movq 8(%rsi), %r10
+ movq (%rdx), %rcx
+ movq %r10, %rdx
+ mulxq %rcx, %rax, %r14
+ movq %r11, %rdx
+ mulxq %rcx, %rdx, %rbx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %rbp
+ movq %rbp, -48(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r15
+ addq %rax, %rbx
+ movq %r15, %rdx
+ mulxq %rcx, %rax, %r13
+ adcq %r14, %rax
+ movq %rbp, %rdx
+ mulxq %rcx, %r8, %r12
+ adcq %r13, %r8
+ movq 32(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rcx, %r9, %r13
+ adcq %r12, %r9
+ movq -56(%rsp), %rcx ## 8-byte Reload
+ movq %rcx, (%rdi)
+ adcq $0, %r13
+ movq -24(%rsp), %rdi ## 8-byte Reload
+ movq 8(%rdi), %rbp
+ movq %r11, %rdx
+ mulxq %rbp, %r12, %r11
+ addq %rbx, %r12
+ movq %r10, %rdx
+ mulxq %rbp, %rbx, %rcx
+ movq %rcx, -56(%rsp) ## 8-byte Spill
+ adcq %rax, %rbx
+ movq %r15, %rdx
+ mulxq %rbp, %rcx, %r10
+ adcq %r8, %rcx
+ movq -48(%rsp), %rdx ## 8-byte Reload
+ mulxq %rbp, %rax, %r8
+ adcq %r9, %rax
+ movq %r14, %rdx
+ mulxq %rbp, %r15, %rdx
+ adcq %r13, %r15
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq %r11, %rbx
+ movq -40(%rsp), %rbp ## 8-byte Reload
+ movq %r12, 8(%rbp)
+ adcq -56(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r10, %rax
+ adcq %r8, %r15
+ adcq %rdx, %r14
+ movq (%rsi), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %r8
+ movq %r8, -48(%rsp) ## 8-byte Spill
+ movq 16(%rdi), %rbp
+ mulxq %rbp, %r12, %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ addq %rbx, %r12
+ movq %r8, %rdx
+ mulxq %rbp, %rbx, %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ adcq %rcx, %rbx
+ movq 16(%rsi), %r11
+ movq %r11, %rdx
+ mulxq %rbp, %rcx, %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ adcq %rax, %rcx
+ movq 24(%rsi), %r13
+ movq %r13, %rdx
+ mulxq %rbp, %r9, %r10
+ adcq %r15, %r9
+ movq 32(%rsi), %r15
+ movq %r15, %rdx
+ mulxq %rbp, %r8, %rdx
+ adcq %r14, %r8
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq -8(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -16(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -32(%rsp), %r9 ## 8-byte Folded Reload
+ adcq %r10, %r8
+ adcq %rdx, %r14
+ movq -40(%rsp), %r10 ## 8-byte Reload
+ movq %r12, 16(%r10)
+ movq %rdi, %rbp
+ movq 24(%rbp), %rax
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r12, %rdi
+ addq %rbx, %r12
+ movq -48(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %rbx, %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ adcq %rcx, %rbx
+ movq %r11, %rdx
+ mulxq %rax, %rcx, %r11
+ adcq %r9, %rcx
+ movq %r13, %rdx
+ mulxq %rax, %r13, %r9
+ adcq %r8, %r13
+ movq %r15, %rdx
+ mulxq %rax, %r8, %rdx
+ adcq %r14, %r8
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq %rdi, %rbx
+ movq %r12, 24(%r10)
+ movq %r10, %rdi
+ adcq -48(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r11, %r13
+ adcq %r9, %r8
+ adcq %rdx, %r14
+ movq 32(%rbp), %rdx
+ mulxq 8(%rsi), %rax, %r9
+ mulxq (%rsi), %rbp, %r10
+ addq %rbx, %rbp
+ adcq %rcx, %rax
+ mulxq 16(%rsi), %rbx, %r11
+ adcq %r13, %rbx
+ movq %rbp, 32(%rdi)
+ mulxq 32(%rsi), %rcx, %r15
+ mulxq 24(%rsi), %rsi, %rdx
+ adcq %r8, %rsi
+ adcq %r14, %rcx
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq %r10, %rax
+ movq %rax, 40(%rdi)
+ adcq %r9, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 56(%rdi)
+ adcq %rdx, %rcx
+ movq %rcx, 64(%rdi)
+ adcq %r15, %rbp
+ movq %rbp, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre5Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre5Lbmi2: ## @mcl_fpDbl_sqrPre5Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 16(%rsi), %r11
+ movq (%rsi), %rax
+ movq 8(%rsi), %rcx
+ movq %r11, %rdx
+ mulxq %rax, %rbx, %r15
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r13
+ movq %rcx, %rdx
+ mulxq %rax, %r12, %rbp
+ movq %rbp, -16(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rax, %rdx, %r14
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ addq %r12, %r14
+ adcq %rbp, %rbx
+ movq %r13, %rdx
+ mulxq %rax, %r8, %r10
+ adcq %r15, %r8
+ movq %r9, %rdx
+ movq %r9, -8(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbp, %r15
+ adcq %r10, %rbp
+ movq -24(%rsp), %rax ## 8-byte Reload
+ movq %rax, (%rdi)
+ adcq $0, %r15
+ addq %r12, %r14
+ movq %rcx, %rdx
+ mulxq %rcx, %rax, %rdx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ adcq %rbx, %rax
+ movq %r11, %rdx
+ mulxq %rcx, %rbx, %r10
+ adcq %r8, %rbx
+ movq %r13, %rdx
+ mulxq %rcx, %r13, %r8
+ adcq %rbp, %r13
+ movq %r9, %rdx
+ mulxq %rcx, %r12, %rcx
+ adcq %r15, %r12
+ sbbq %r15, %r15
+ andl $1, %r15d
+ addq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %r14, 8(%rdi)
+ adcq -24(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %r10, %r13
+ adcq %r8, %r12
+ adcq %rcx, %r15
+ movq (%rsi), %r9
+ movq 8(%rsi), %r10
+ movq %r9, %rdx
+ mulxq %r11, %rbp, %rcx
+ movq %rcx, -16(%rsp) ## 8-byte Spill
+ addq %rax, %rbp
+ movq %r10, %rdx
+ mulxq %r11, %rax, %r8
+ adcq %rbx, %rax
+ movq %r11, %rdx
+ mulxq %r11, %r14, %rcx
+ movq %rcx, -24(%rsp) ## 8-byte Spill
+ adcq %r13, %r14
+ movq 24(%rsi), %rcx
+ movq %rcx, %rdx
+ mulxq %r11, %rbx, %r13
+ adcq %r12, %rbx
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ mulxq %r11, %r12, %rdx
+ adcq %r15, %r12
+ sbbq %r15, %r15
+ andl $1, %r15d
+ addq -16(%rsp), %rax ## 8-byte Folded Reload
+ adcq %r8, %r14
+ movq %rbp, 16(%rdi)
+ adcq -24(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %r13, %r12
+ adcq %rdx, %r15
+ movq %r10, %rdx
+ mulxq %rcx, %r10, %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ movq %r9, %rdx
+ mulxq %rcx, %r13, %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ addq %rax, %r13
+ movq 16(%rsi), %r8
+ movq 32(%rsi), %rax
+ adcq %r14, %r10
+ movq %r8, %rdx
+ mulxq %rcx, %r9, %r14
+ adcq %rbx, %r9
+ movq %rcx, %rdx
+ mulxq %rcx, %r11, %rbp
+ adcq %r12, %r11
+ movq %rax, %rdx
+ mulxq %rcx, %r12, %rdx
+ adcq %r15, %r12
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq -16(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r13, 24(%rdi)
+ adcq -8(%rsp), %r9 ## 8-byte Folded Reload
+ adcq %r14, %r11
+ adcq %rbp, %r12
+ adcq %rdx, %rbx
+ movq %rax, %rdx
+ mulxq 24(%rsi), %rbp, %r14
+ mulxq (%rsi), %rdx, %r15
+ addq %r10, %rdx
+ movq %rdx, 32(%rdi)
+ movq %rax, %rdx
+ mulxq 8(%rsi), %rsi, %r10
+ adcq %r9, %rsi
+ movq %r8, %rdx
+ mulxq %rax, %rcx, %r8
+ adcq %r11, %rcx
+ adcq %r12, %rbp
+ movq %rax, %rdx
+ mulxq %rax, %rdx, %rax
+ adcq %rbx, %rdx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq %r15, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r10, %rcx
+ movq %rcx, 48(%rdi)
+ adcq %r8, %rbp
+ movq %rbp, 56(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 64(%rdi)
+ adcq %rax, %rbx
+ movq %rbx, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont5Lbmi2: ## @mcl_fp_mont5Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %rdi
+ movq %rdi, -104(%rsp) ## 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %r10, %rbx
+ movq 24(%rsi), %rdx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ mulxq %rax, %r12, %r14
+ movq 16(%rsi), %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ mulxq %rax, %r13, %r11
+ movq (%rsi), %rbp
+ movq %rbp, -40(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ mulxq %rax, %rdi, %r9
+ movq %rbp, %rdx
+ mulxq %rax, %r15, %r8
+ addq %rdi, %r8
+ adcq %r13, %r9
+ adcq %r12, %r11
+ adcq %r10, %r14
+ adcq $0, %rbx
+ movq %rbx, -112(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq %r15, %rdx
+ imulq %rax, %rdx
+ movq 32(%rcx), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulxq %rax, %rax, %r12
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulxq %rax, %r13, %r10
+ movq 8(%rcx), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulxq %rax, %rdi, %rbp
+ movq (%rcx), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulxq %rax, %rsi, %rbx
+ addq %rdi, %rbx
+ movq 16(%rcx), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ mulxq %rax, %rdi, %rcx
+ adcq %rbp, %rdi
+ adcq %r13, %rcx
+ adcq -120(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %r12
+ addq %r15, %rsi
+ adcq %r8, %rbx
+ adcq %r9, %rdi
+ adcq %r11, %rcx
+ adcq %r14, %r10
+ adcq -112(%rsp), %r12 ## 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ mulxq -104(%rsp), %rax, %r14 ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulxq -24(%rsp), %rax, %r15 ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %r13, %r9 ## 8-byte Folded Reload
+ mulxq -48(%rsp), %r8, %rsi ## 8-byte Folded Reload
+ mulxq -40(%rsp), %r11, %rax ## 8-byte Folded Reload
+ addq %r8, %rax
+ adcq %r13, %rsi
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r15 ## 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rbx, %r11
+ adcq %rdi, %rax
+ adcq %rcx, %rsi
+ adcq %r10, %r9
+ adcq %r12, %r15
+ adcq %rbp, %r14
+ sbbq %r12, %r12
+ andl $1, %r12d
+ movq %r11, %rdx
+ imulq -16(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -56(%rsp), %rcx, %r10 ## 8-byte Folded Reload
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ mulxq -64(%rsp), %rcx, %rdi ## 8-byte Folded Reload
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ mulxq -88(%rsp), %r13, %rcx ## 8-byte Folded Reload
+ mulxq -72(%rsp), %r8, %rbx ## 8-byte Folded Reload
+ mulxq -80(%rsp), %rdx, %rbp ## 8-byte Folded Reload
+ addq %r8, %rbp
+ adcq %r13, %rbx
+ adcq -120(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -112(%rsp), %rdi ## 8-byte Folded Reload
+ adcq $0, %r10
+ addq %r11, %rdx
+ adcq %rax, %rbp
+ adcq %rsi, %rbx
+ adcq %r9, %rcx
+ adcq %r15, %rdi
+ adcq %r14, %r10
+ adcq $0, %r12
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ mulxq -104(%rsp), %rax, %r15 ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulxq -24(%rsp), %rax, %r11 ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %r13, %r9 ## 8-byte Folded Reload
+ mulxq -48(%rsp), %rsi, %r8 ## 8-byte Folded Reload
+ mulxq -40(%rsp), %r14, %rax ## 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %r13, %r8
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %r15
+ addq %rbp, %r14
+ adcq %rbx, %rax
+ adcq %rcx, %r8
+ adcq %rdi, %r9
+ adcq %r10, %r11
+ adcq %r12, %r15
+ sbbq %r13, %r13
+ andl $1, %r13d
+ movq %r14, %rdx
+ imulq -16(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -56(%rsp), %rcx, %r12 ## 8-byte Folded Reload
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ mulxq -64(%rsp), %rcx, %r10 ## 8-byte Folded Reload
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ mulxq -88(%rsp), %rdi, %rsi ## 8-byte Folded Reload
+ mulxq -72(%rsp), %rcx, %rbx ## 8-byte Folded Reload
+ mulxq -80(%rsp), %rdx, %rbp ## 8-byte Folded Reload
+ addq %rcx, %rbp
+ adcq %rdi, %rbx
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -112(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %r12
+ addq %r14, %rdx
+ adcq %rax, %rbp
+ adcq %r8, %rbx
+ adcq %r9, %rsi
+ adcq %r11, %r10
+ adcq %r15, %r12
+ adcq $0, %r13
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ mulxq -104(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulxq -24(%rsp), %r11, %r14 ## 8-byte Folded Reload
+ mulxq -32(%rsp), %r8, %r9 ## 8-byte Folded Reload
+ mulxq -48(%rsp), %rax, %rdi ## 8-byte Folded Reload
+ mulxq -40(%rsp), %r15, %rcx ## 8-byte Folded Reload
+ addq %rax, %rcx
+ adcq %r8, %rdi
+ adcq %r11, %r9
+ adcq -120(%rsp), %r14 ## 8-byte Folded Reload
+ movq -112(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rbp, %r15
+ adcq %rbx, %rcx
+ adcq %rsi, %rdi
+ adcq %r10, %r9
+ adcq %r12, %r14
+ adcq %r13, %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ sbbq %r12, %r12
+ andl $1, %r12d
+ movq %r15, %rdx
+ imulq -16(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -56(%rsp), %rax, %rbp ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ mulxq -64(%rsp), %r13, %r10 ## 8-byte Folded Reload
+ mulxq -88(%rsp), %rbx, %r8 ## 8-byte Folded Reload
+ mulxq -72(%rsp), %rsi, %r11 ## 8-byte Folded Reload
+ mulxq -80(%rsp), %rdx, %rax ## 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %rbx, %r11
+ adcq %r13, %r8
+ adcq -120(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %rbp
+ addq %r15, %rdx
+ adcq %rcx, %rax
+ adcq %rdi, %r11
+ adcq %r9, %r8
+ adcq %r14, %r10
+ adcq -112(%rsp), %rbp ## 8-byte Folded Reload
+ adcq $0, %r12
+ movq -96(%rsp), %rcx ## 8-byte Reload
+ movq 32(%rcx), %rdx
+ mulxq -104(%rsp), %rcx, %r14 ## 8-byte Folded Reload
+ movq %rcx, -96(%rsp) ## 8-byte Spill
+ mulxq -24(%rsp), %rcx, %rbx ## 8-byte Folded Reload
+ movq %rcx, -104(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %rsi, %r15 ## 8-byte Folded Reload
+ mulxq -48(%rsp), %rcx, %r9 ## 8-byte Folded Reload
+ mulxq -40(%rsp), %r13, %rdi ## 8-byte Folded Reload
+ addq %rcx, %rdi
+ adcq %rsi, %r9
+ adcq -104(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rbx ## 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rax, %r13
+ adcq %r11, %rdi
+ adcq %r8, %r9
+ adcq %r10, %r15
+ adcq %rbp, %rbx
+ adcq %r12, %r14
+ sbbq %rax, %rax
+ movq -16(%rsp), %rdx ## 8-byte Reload
+ imulq %r13, %rdx
+ mulxq -80(%rsp), %r10, %rcx ## 8-byte Folded Reload
+ mulxq -72(%rsp), %r8, %rsi ## 8-byte Folded Reload
+ addq %rcx, %r8
+ mulxq -88(%rsp), %rbp, %r11 ## 8-byte Folded Reload
+ adcq %rsi, %rbp
+ mulxq -64(%rsp), %rcx, %r12 ## 8-byte Folded Reload
+ adcq %r11, %rcx
+ mulxq -56(%rsp), %rsi, %r11 ## 8-byte Folded Reload
+ adcq %r12, %rsi
+ adcq $0, %r11
+ andl $1, %eax
+ addq %r13, %r10
+ adcq %rdi, %r8
+ adcq %r9, %rbp
+ adcq %r15, %rcx
+ adcq %rbx, %rsi
+ adcq %r14, %r11
+ adcq $0, %rax
+ movq %r8, %rdi
+ subq -80(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rbp, %rbx
+ sbbq -72(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rcx, %r9
+ sbbq -88(%rsp), %r9 ## 8-byte Folded Reload
+ movq %rsi, %rdx
+ sbbq -64(%rsp), %rdx ## 8-byte Folded Reload
+ movq %r11, %r10
+ sbbq -56(%rsp), %r10 ## 8-byte Folded Reload
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rsi, %rdx
+ testb %al, %al
+ cmovneq %r8, %rdi
+ movq -8(%rsp), %rax ## 8-byte Reload
+ movq %rdi, (%rax)
+ cmovneq %rbp, %rbx
+ movq %rbx, 8(%rax)
+ cmovneq %rcx, %r9
+ movq %r9, 16(%rax)
+ movq %rdx, 24(%rax)
+ cmovneq %r11, %r10
+ movq %r10, 32(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF5Lbmi2: ## @mcl_fp_montNF5Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq (%rsi), %r13
+ movq 8(%rsi), %rbp
+ movq %rbp, -104(%rsp) ## 8-byte Spill
+ movq (%rdx), %rax
+ movq %rbp, %rdx
+ mulxq %rax, %rbp, %r9
+ movq %r13, %rdx
+ movq %r13, -24(%rsp) ## 8-byte Spill
+ mulxq %rax, %r8, %r10
+ movq 16(%rsi), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ addq %rbp, %r10
+ mulxq %rax, %rbp, %rbx
+ adcq %r9, %rbp
+ movq 24(%rsi), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rax, %r15, %r9
+ adcq %rbx, %r15
+ movq 32(%rsi), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rax, %rax, %r11
+ adcq %r9, %rax
+ adcq $0, %r11
+ movq -8(%rcx), %rsi
+ movq %rsi, -32(%rsp) ## 8-byte Spill
+ movq %r8, %rdx
+ imulq %rsi, %rdx
+ movq (%rcx), %rsi
+ movq %rsi, -48(%rsp) ## 8-byte Spill
+ mulxq %rsi, %rbx, %r14
+ addq %r8, %rbx
+ movq 8(%rcx), %rsi
+ movq %rsi, -40(%rsp) ## 8-byte Spill
+ mulxq %rsi, %rbx, %r12
+ adcq %r10, %rbx
+ movq 16(%rcx), %rsi
+ movq %rsi, -16(%rsp) ## 8-byte Spill
+ mulxq %rsi, %r10, %rdi
+ adcq %rbp, %r10
+ movq 24(%rcx), %rsi
+ movq %rsi, -88(%rsp) ## 8-byte Spill
+ mulxq %rsi, %r9, %rbp
+ adcq %r15, %r9
+ movq 32(%rcx), %rcx
+ movq %rcx, -56(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r8, %rcx
+ adcq %rax, %r8
+ adcq $0, %r11
+ addq %r14, %rbx
+ adcq %r12, %r10
+ adcq %rdi, %r9
+ adcq %rbp, %r8
+ adcq %rcx, %r11
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ mulxq -104(%rsp), %rcx, %rsi ## 8-byte Folded Reload
+ mulxq %r13, %r14, %rax
+ addq %rcx, %rax
+ mulxq -64(%rsp), %rcx, %rdi ## 8-byte Folded Reload
+ adcq %rsi, %rcx
+ mulxq -72(%rsp), %rsi, %r15 ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -80(%rsp), %rdi, %rbp ## 8-byte Folded Reload
+ adcq %r15, %rdi
+ adcq $0, %rbp
+ addq %rbx, %r14
+ adcq %r10, %rax
+ adcq %r9, %rcx
+ adcq %r8, %rsi
+ adcq %r11, %rdi
+ adcq $0, %rbp
+ movq %r14, %rdx
+ movq -32(%rsp), %r12 ## 8-byte Reload
+ imulq %r12, %rdx
+ mulxq -48(%rsp), %rbx, %r15 ## 8-byte Folded Reload
+ addq %r14, %rbx
+ movq -40(%rsp), %r13 ## 8-byte Reload
+ mulxq %r13, %r8, %rbx
+ adcq %rax, %r8
+ mulxq -16(%rsp), %r9, %rax ## 8-byte Folded Reload
+ adcq %rcx, %r9
+ mulxq -88(%rsp), %r10, %rcx ## 8-byte Folded Reload
+ adcq %rsi, %r10
+ mulxq -56(%rsp), %r11, %rdx ## 8-byte Folded Reload
+ adcq %rdi, %r11
+ adcq $0, %rbp
+ addq %r15, %r8
+ adcq %rbx, %r9
+ adcq %rax, %r10
+ adcq %rcx, %r11
+ adcq %rdx, %rbp
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ mulxq -104(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq -24(%rsp), %r14, %rsi ## 8-byte Folded Reload
+ addq %rcx, %rsi
+ mulxq -64(%rsp), %rbx, %rcx ## 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -72(%rsp), %rdi, %r15 ## 8-byte Folded Reload
+ adcq %rcx, %rdi
+ mulxq -80(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ adcq %r15, %rcx
+ adcq $0, %rax
+ addq %r8, %r14
+ adcq %r9, %rsi
+ adcq %r10, %rbx
+ adcq %r11, %rdi
+ adcq %rbp, %rcx
+ adcq $0, %rax
+ movq %r14, %rdx
+ imulq %r12, %rdx
+ movq -48(%rsp), %r12 ## 8-byte Reload
+ mulxq %r12, %rbp, %r15
+ addq %r14, %rbp
+ mulxq %r13, %r8, %rbp
+ adcq %rsi, %r8
+ movq -16(%rsp), %r13 ## 8-byte Reload
+ mulxq %r13, %r9, %rsi
+ adcq %rbx, %r9
+ mulxq -88(%rsp), %r10, %rbx ## 8-byte Folded Reload
+ adcq %rdi, %r10
+ mulxq -56(%rsp), %r11, %rdx ## 8-byte Folded Reload
+ adcq %rcx, %r11
+ adcq $0, %rax
+ addq %r15, %r8
+ adcq %rbp, %r9
+ adcq %rsi, %r10
+ adcq %rbx, %r11
+ adcq %rdx, %rax
+ movq -96(%rsp), %rcx ## 8-byte Reload
+ movq 24(%rcx), %rdx
+ mulxq -104(%rsp), %rdi, %rsi ## 8-byte Folded Reload
+ mulxq -24(%rsp), %r14, %rcx ## 8-byte Folded Reload
+ addq %rdi, %rcx
+ mulxq -64(%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ adcq %rsi, %rbx
+ mulxq -72(%rsp), %rsi, %r15 ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -80(%rsp), %rdi, %rbp ## 8-byte Folded Reload
+ adcq %r15, %rdi
+ adcq $0, %rbp
+ addq %r8, %r14
+ adcq %r9, %rcx
+ adcq %r10, %rbx
+ adcq %r11, %rsi
+ adcq %rax, %rdi
+ adcq $0, %rbp
+ movq %r14, %rdx
+ imulq -32(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq %r12, %rax, %r11
+ addq %r14, %rax
+ mulxq -40(%rsp), %r8, %r14 ## 8-byte Folded Reload
+ adcq %rcx, %r8
+ mulxq %r13, %r9, %rax
+ adcq %rbx, %r9
+ movq -88(%rsp), %r12 ## 8-byte Reload
+ mulxq %r12, %r10, %rbx
+ adcq %rsi, %r10
+ mulxq -56(%rsp), %rcx, %rdx ## 8-byte Folded Reload
+ adcq %rdi, %rcx
+ adcq $0, %rbp
+ addq %r11, %r8
+ adcq %r14, %r9
+ adcq %rax, %r10
+ adcq %rbx, %rcx
+ adcq %rdx, %rbp
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ mulxq -104(%rsp), %rdi, %rbx ## 8-byte Folded Reload
+ mulxq -24(%rsp), %r14, %rsi ## 8-byte Folded Reload
+ addq %rdi, %rsi
+ mulxq -64(%rsp), %rdi, %rax ## 8-byte Folded Reload
+ adcq %rbx, %rdi
+ mulxq -72(%rsp), %rbx, %r15 ## 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -80(%rsp), %r11, %rax ## 8-byte Folded Reload
+ adcq %r15, %r11
+ adcq $0, %rax
+ addq %r8, %r14
+ adcq %r9, %rsi
+ adcq %r10, %rdi
+ adcq %rcx, %rbx
+ adcq %rbp, %r11
+ adcq $0, %rax
+ movq -32(%rsp), %rdx ## 8-byte Reload
+ imulq %r14, %rdx
+ movq -48(%rsp), %r10 ## 8-byte Reload
+ mulxq %r10, %rcx, %rbp
+ movq %rbp, -96(%rsp) ## 8-byte Spill
+ addq %r14, %rcx
+ movq -40(%rsp), %r9 ## 8-byte Reload
+ mulxq %r9, %r14, %rcx
+ movq %rcx, -104(%rsp) ## 8-byte Spill
+ adcq %rsi, %r14
+ movq %r13, %r8
+ mulxq %r8, %r15, %r13
+ adcq %rdi, %r15
+ mulxq %r12, %rbp, %rcx
+ adcq %rbx, %rbp
+ movq -56(%rsp), %rbx ## 8-byte Reload
+ mulxq %rbx, %r12, %rdx
+ adcq %r11, %r12
+ adcq $0, %rax
+ addq -96(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r15 ## 8-byte Folded Reload
+ adcq %r13, %rbp
+ adcq %rcx, %r12
+ adcq %rdx, %rax
+ movq %r14, %rcx
+ subq %r10, %rcx
+ movq %r15, %rsi
+ sbbq %r9, %rsi
+ movq %rbp, %rdi
+ sbbq %r8, %rdi
+ movq %r12, %r8
+ sbbq -88(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rax, %rdx
+ sbbq %rbx, %rdx
+ movq %rdx, %rbx
+ sarq $63, %rbx
+ cmovsq %r14, %rcx
+ movq -8(%rsp), %rbx ## 8-byte Reload
+ movq %rcx, (%rbx)
+ cmovsq %r15, %rsi
+ movq %rsi, 8(%rbx)
+ cmovsq %rbp, %rdi
+ movq %rdi, 16(%rbx)
+ cmovsq %r12, %r8
+ movq %r8, 24(%rbx)
+ cmovsq %rax, %rdx
+ movq %rdx, 32(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed5Lbmi2: ## @mcl_fp_montRed5Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq (%rsi), %r15
+ movq %r15, %rdx
+ imulq %rax, %rdx
+ movq 32(%rcx), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulxq %rax, %r8, %r14
+ movq 24(%rcx), %r12
+ mulxq %r12, %r10, %r13
+ movq %r12, -56(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %r9
+ mulxq %r9, %rdi, %rbp
+ movq %r9, -64(%rsp) ## 8-byte Spill
+ movq (%rcx), %rbx
+ movq %rbx, -40(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulxq %rax, %rax, %r11
+ mulxq %rbx, %rdx, %rcx
+ addq %rax, %rcx
+ adcq %rdi, %r11
+ adcq %r10, %rbp
+ adcq %r8, %r13
+ adcq $0, %r14
+ addq %r15, %rdx
+ movq 72(%rsi), %rax
+ movq 64(%rsi), %rdx
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r11
+ adcq 24(%rsi), %rbp
+ adcq 32(%rsi), %r13
+ adcq 40(%rsi), %r14
+ movq %r14, -112(%rsp) ## 8-byte Spill
+ movq 56(%rsi), %rdi
+ movq 48(%rsi), %rsi
+ adcq $0, %rsi
+ movq %rsi, -32(%rsp) ## 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rcx, %rdx
+ movq -104(%rsp), %r14 ## 8-byte Reload
+ imulq %r14, %rdx
+ mulxq -72(%rsp), %rax, %r15 ## 8-byte Folded Reload
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ mulxq %r12, %rax, %r10
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ mulxq %r9, %rbx, %r8
+ movq -80(%rsp), %r12 ## 8-byte Reload
+ mulxq %r12, %r9, %rdi
+ mulxq -40(%rsp), %rdx, %rax ## 8-byte Folded Reload
+ addq %r9, %rax
+ adcq %rbx, %rdi
+ adcq -24(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -16(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %r15
+ addq %rcx, %rdx
+ adcq %r11, %rax
+ adcq %rbp, %rdi
+ adcq %r13, %r8
+ adcq -112(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -32(%rsp), %r15 ## 8-byte Folded Reload
+ adcq $0, -88(%rsp) ## 8-byte Folded Spill
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rsi
+ movq %rax, %rdx
+ imulq %r14, %rdx
+ mulxq -72(%rsp), %rcx, %r13 ## 8-byte Folded Reload
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ mulxq -56(%rsp), %rcx, %r14 ## 8-byte Folded Reload
+ movq %rcx, -32(%rsp) ## 8-byte Spill
+ mulxq -64(%rsp), %r11, %rbx ## 8-byte Folded Reload
+ mulxq %r12, %r9, %rbp
+ mulxq -40(%rsp), %rdx, %rcx ## 8-byte Folded Reload
+ addq %r9, %rcx
+ adcq %r11, %rbp
+ adcq -32(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rax, %rdx
+ adcq %rdi, %rcx
+ adcq %r8, %rbp
+ adcq %r10, %rbx
+ adcq %r15, %r14
+ adcq -88(%rsp), %r13 ## 8-byte Folded Reload
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rsi
+ movq %rcx, %rdx
+ imulq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq -72(%rsp), %r9 ## 8-byte Reload
+ mulxq %r9, %rax, %r12
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ mulxq -56(%rsp), %rax, %r10 ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulxq -64(%rsp), %r8, %r11 ## 8-byte Folded Reload
+ mulxq -80(%rsp), %rdi, %r15 ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rdx, %rax ## 8-byte Folded Reload
+ addq %rdi, %rax
+ adcq %r8, %r15
+ adcq -112(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %r12
+ addq %rcx, %rdx
+ adcq %rbp, %rax
+ adcq %rbx, %r15
+ adcq %r14, %r11
+ adcq %r13, %r10
+ adcq -96(%rsp), %r12 ## 8-byte Folded Reload
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rsi
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ mulxq %r9, %rdi, %rcx
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ mulxq -56(%rsp), %rbp, %rdi ## 8-byte Folded Reload
+ movq %rbp, -104(%rsp) ## 8-byte Spill
+ mulxq -64(%rsp), %r13, %rbp ## 8-byte Folded Reload
+ movq -40(%rsp), %r14 ## 8-byte Reload
+ mulxq %r14, %r8, %r9
+ mulxq -80(%rsp), %rbx, %rdx ## 8-byte Folded Reload
+ addq %r9, %rbx
+ adcq %r13, %rdx
+ adcq -104(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -96(%rsp), %rdi ## 8-byte Folded Reload
+ adcq $0, %rcx
+ addq %rax, %r8
+ adcq %r15, %rbx
+ adcq %r11, %rdx
+ adcq %r10, %rbp
+ adcq %r12, %rdi
+ adcq -48(%rsp), %rcx ## 8-byte Folded Reload
+ adcq $0, %rsi
+ movq %rbx, %rax
+ subq %r14, %rax
+ movq %rdx, %r8
+ sbbq -80(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rbp, %r9
+ sbbq -64(%rsp), %r9 ## 8-byte Folded Reload
+ movq %rdi, %r10
+ sbbq -56(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rcx, %r11
+ sbbq -72(%rsp), %r11 ## 8-byte Folded Reload
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rcx, %r11
+ testb %sil, %sil
+ cmovneq %rbx, %rax
+ movq -8(%rsp), %rcx ## 8-byte Reload
+ movq %rax, (%rcx)
+ cmovneq %rdx, %r8
+ movq %r8, 8(%rcx)
+ cmovneq %rbp, %r9
+ movq %r9, 16(%rcx)
+ cmovneq %rdi, %r10
+ movq %r10, 24(%rcx)
+ movq %r11, 32(%rcx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre5Lbmi2: ## @mcl_fp_addPre5Lbmi2
+## BB#0:
+ movq 32(%rdx), %r8
+ movq 24(%rdx), %r9
+ movq 24(%rsi), %r11
+ movq 32(%rsi), %r10
+ movq 16(%rdx), %rcx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rcx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ adcq %r9, %r11
+ movq %r11, 24(%rdi)
+ adcq %r8, %r10
+ movq %r10, 32(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre5Lbmi2: ## @mcl_fp_subPre5Lbmi2
+## BB#0:
+ pushq %rbx
+ movq 32(%rsi), %r10
+ movq 24(%rdx), %r8
+ movq 32(%rdx), %r9
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %rcx
+ movq %rbx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r8, %r11
+ movq %r11, 24(%rdi)
+ sbbq %r9, %r10
+ movq %r10, 32(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ retq
+
+ .globl _mcl_fp_shr1_5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_5Lbmi2: ## @mcl_fp_shr1_5Lbmi2
+## BB#0:
+ movq 32(%rsi), %r8
+ movq 24(%rsi), %rcx
+ movq 16(%rsi), %rdx
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rax
+ movq %rax, (%rdi)
+ shrdq $1, %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 16(%rdi)
+ shrdq $1, %r8, %rcx
+ movq %rcx, 24(%rdi)
+ shrq %r8
+ movq %r8, 32(%rdi)
+ retq
+
+ .globl _mcl_fp_add5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add5Lbmi2: ## @mcl_fp_add5Lbmi2
+## BB#0:
+ pushq %rbx
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %rbx
+ movq 24(%rsi), %r9
+ movq 32(%rsi), %r8
+ movq 16(%rdx), %r10
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r10
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ adcq %rbx, %r9
+ movq %r9, 24(%rdi)
+ adcq %r11, %r8
+ movq %r8, 32(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r10
+ sbbq 24(%rcx), %r9
+ sbbq 32(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB74_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r9, 24(%rdi)
+ movq %r8, 32(%rdi)
+LBB74_2: ## %carry
+ popq %rbx
+ retq
+
+ .globl _mcl_fp_addNF5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF5Lbmi2: ## @mcl_fp_addNF5Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 32(%rdx), %r8
+ movq 24(%rdx), %r9
+ movq 16(%rdx), %r10
+ movq (%rdx), %r14
+ movq 8(%rdx), %r11
+ addq (%rsi), %r14
+ adcq 8(%rsi), %r11
+ adcq 16(%rsi), %r10
+ adcq 24(%rsi), %r9
+ adcq 32(%rsi), %r8
+ movq %r14, %rsi
+ subq (%rcx), %rsi
+ movq %r11, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r10, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r9, %r15
+ sbbq 24(%rcx), %r15
+ movq %r8, %rax
+ sbbq 32(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r14, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ cmovsq %r9, %r15
+ movq %r15, 24(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 32(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_sub5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub5Lbmi2: ## @mcl_fp_sub5Lbmi2
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 32(%rsi), %r8
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r10, 16(%rdi)
+ sbbq %r11, %r9
+ movq %r9, 24(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 32(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB76_2
+## BB#1: ## %carry
+ movq 32(%rcx), %r11
+ movq 24(%rcx), %r14
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rbx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %rsi, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r9, %r14
+ movq %r14, 24(%rdi)
+ adcq %r8, %r11
+ movq %r11, 32(%rdi)
+LBB76_2: ## %nocarry
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fp_subNF5Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF5Lbmi2: ## @mcl_fp_subNF5Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 32(%rsi), %r12
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r9
+ movdqu (%rsi), %xmm2
+ movdqu 16(%rsi), %xmm3
+ pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1]
+ movd %xmm4, %r8
+ movd %xmm1, %r10
+ movd %xmm3, %r14
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %r11
+ pshufd $78, %xmm2, %xmm1 ## xmm1 = xmm2[2,3,0,1]
+ movd %xmm1, %r15
+ movd %xmm0, %rsi
+ movd %xmm2, %r13
+ subq %rsi, %r13
+ sbbq %r11, %r15
+ sbbq %r10, %r14
+ sbbq %r9, %r8
+ sbbq 32(%rdx), %r12
+ movq %r12, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rsi
+ shldq $1, %r12, %rsi
+ movq 8(%rcx), %rax
+ andq %rsi, %rax
+ andq (%rcx), %rsi
+ movq 32(%rcx), %r9
+ andq %rdx, %r9
+ rorxq $63, %rdx, %rbx
+ andq 24(%rcx), %rdx
+ andq 16(%rcx), %rbx
+ addq %r13, %rsi
+ movq %rsi, (%rdi)
+ adcq %r15, %rax
+ movq %rax, 8(%rdi)
+ adcq %r14, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r8, %rdx
+ movq %rdx, 24(%rdi)
+ adcq %r12, %r9
+ movq %r9, 32(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_add5Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add5Lbmi2: ## @mcl_fpDbl_add5Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 72(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 64(%rdx), %r11
+ movq 56(%rdx), %r14
+ movq 48(%rdx), %r15
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %r13
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %rbp
+ adcq 32(%rdx), %r13
+ movq 40(%rdx), %r9
+ movq %rbx, (%rdi)
+ movq 72(%rsi), %r8
+ movq %rax, 8(%rdi)
+ movq 64(%rsi), %r10
+ movq %r12, 16(%rdi)
+ movq 56(%rsi), %r12
+ movq %rbp, 24(%rdi)
+ movq 48(%rsi), %rbp
+ movq 40(%rsi), %rbx
+ movq %r13, 32(%rdi)
+ adcq %r9, %rbx
+ adcq %r15, %rbp
+ adcq %r14, %r12
+ adcq %r11, %r10
+ adcq -8(%rsp), %r8 ## 8-byte Folded Reload
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rbx, %rax
+ subq (%rcx), %rax
+ movq %rbp, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r12, %r9
+ sbbq 16(%rcx), %r9
+ movq %r10, %r11
+ sbbq 24(%rcx), %r11
+ movq %r8, %r14
+ sbbq 32(%rcx), %r14
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rbx, %rax
+ movq %rax, 40(%rdi)
+ testb %sil, %sil
+ cmovneq %rbp, %rdx
+ movq %rdx, 48(%rdi)
+ cmovneq %r12, %r9
+ movq %r9, 56(%rdi)
+ cmovneq %r10, %r11
+ movq %r11, 64(%rdi)
+ cmovneq %r8, %r14
+ movq %r14, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub5Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub5Lbmi2: ## @mcl_fpDbl_sub5Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 72(%rdx), %r9
+ movq 64(%rdx), %r10
+ movq 56(%rdx), %r14
+ movq 16(%rsi), %r8
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %eax, %eax
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r8
+ movq 24(%rsi), %r12
+ sbbq 24(%rdx), %r12
+ movq %r15, (%rdi)
+ movq 32(%rsi), %rbx
+ sbbq 32(%rdx), %rbx
+ movq %r11, 8(%rdi)
+ movq 48(%rdx), %r15
+ movq 40(%rdx), %rdx
+ movq %r8, 16(%rdi)
+ movq 72(%rsi), %r8
+ movq %r12, 24(%rdi)
+ movq 64(%rsi), %r11
+ movq %rbx, 32(%rdi)
+ movq 40(%rsi), %rbp
+ sbbq %rdx, %rbp
+ movq 56(%rsi), %r12
+ movq 48(%rsi), %r13
+ sbbq %r15, %r13
+ sbbq %r14, %r12
+ sbbq %r10, %r11
+ sbbq %r9, %r8
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ movq 16(%rcx), %rdx
+ cmoveq %rax, %rdx
+ movq 8(%rcx), %rbx
+ cmoveq %rax, %rbx
+ movq 32(%rcx), %r9
+ cmoveq %rax, %r9
+ cmovneq 24(%rcx), %rax
+ addq %rbp, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r13, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r12, %rdx
+ movq %rdx, 56(%rdi)
+ adcq %r11, %rax
+ movq %rax, 64(%rdi)
+ adcq %r8, %r9
+ movq %r9, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mulUnitPre6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre6Lbmi2: ## @mcl_fp_mulUnitPre6Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ mulxq 40(%rsi), %r8, %r11
+ mulxq 32(%rsi), %r9, %r12
+ mulxq 24(%rsi), %r10, %rcx
+ mulxq 16(%rsi), %r14, %rbx
+ mulxq 8(%rsi), %r15, %rax
+ mulxq (%rsi), %rdx, %rsi
+ movq %rdx, (%rdi)
+ addq %r15, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r14, %rax
+ movq %rax, 16(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r9, %rcx
+ movq %rcx, 32(%rdi)
+ adcq %r8, %r12
+ movq %r12, 40(%rdi)
+ adcq $0, %r11
+ movq %r11, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_mulPre6Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre6Lbmi2: ## @mcl_fpDbl_mulPre6Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r11
+ movq %rdi, -48(%rsp) ## 8-byte Spill
+ movq (%rsi), %r15
+ movq 8(%rsi), %rcx
+ movq %rcx, -80(%rsp) ## 8-byte Spill
+ movq (%r11), %rax
+ movq %r11, -56(%rsp) ## 8-byte Spill
+ movq %rcx, %rdx
+ mulxq %rax, %rcx, %r14
+ movq %r15, %rdx
+ mulxq %rax, %rdx, %rbp
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %rbx
+ movq %rbx, -88(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ addq %rcx, %rbp
+ mulxq %rax, %rcx, %r12
+ adcq %r14, %rcx
+ movq %rbx, %rdx
+ mulxq %rax, %rbx, %r14
+ adcq %r12, %rbx
+ movq 32(%rsi), %r12
+ movq %r12, %rdx
+ mulxq %rax, %r8, %r13
+ adcq %r14, %r8
+ movq 40(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rax, %r9, %r10
+ adcq %r13, %r9
+ movq -72(%rsp), %rax ## 8-byte Reload
+ movq %rax, (%rdi)
+ adcq $0, %r10
+ movq 8(%r11), %rdi
+ movq %r15, %rdx
+ mulxq %rdi, %r13, %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ addq %rbp, %r13
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rbp, %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ adcq %rcx, %rbp
+ movq -64(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rax, %r11
+ adcq %rbx, %rax
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rbx, %rcx
+ movq %rcx, -88(%rsp) ## 8-byte Spill
+ adcq %r8, %rbx
+ movq %r12, %rdx
+ mulxq %rdi, %rcx, %r8
+ adcq %r9, %rcx
+ movq %r14, %rdx
+ mulxq %rdi, %r12, %rdx
+ adcq %r10, %r12
+ sbbq %r15, %r15
+ andl $1, %r15d
+ addq -72(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -80(%rsp), %rax ## 8-byte Folded Reload
+ adcq %r11, %rbx
+ movq -48(%rsp), %rdi ## 8-byte Reload
+ movq %r13, 8(%rdi)
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r8, %r12
+ adcq %rdx, %r15
+ movq (%rsi), %rdx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %r8
+ movq %r8, -80(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %r14 ## 8-byte Reload
+ movq 16(%r14), %rdi
+ mulxq %rdi, %r13, %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ addq %rbp, %r13
+ movq %r8, %rdx
+ mulxq %rdi, %r8, %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ adcq %rax, %r8
+ movq 16(%rsi), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ mulxq %rdi, %r11, %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ adcq %rbx, %r11
+ movq 24(%rsi), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rdi, %rax, %rbx
+ adcq %rcx, %rax
+ movq 32(%rsi), %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ mulxq %rdi, %r10, %rcx
+ adcq %r12, %r10
+ movq 40(%rsi), %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ mulxq %rdi, %r9, %rdx
+ adcq %r15, %r9
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq -8(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -16(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -24(%rsp), %rax ## 8-byte Folded Reload
+ adcq %rbx, %r10
+ adcq %rcx, %r9
+ adcq %rdx, %rbp
+ movq -48(%rsp), %rcx ## 8-byte Reload
+ movq %r13, 16(%rcx)
+ movq 24(%r14), %rdi
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r12, %rcx
+ movq %rcx, -88(%rsp) ## 8-byte Spill
+ addq %r8, %r12
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rbx, %rcx
+ movq %rcx, -80(%rsp) ## 8-byte Spill
+ adcq %r11, %rbx
+ movq -64(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rcx, %r11
+ adcq %rax, %rcx
+ movq -72(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r14, %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ adcq %r10, %r14
+ movq -32(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r8, %rax
+ adcq %r9, %r8
+ movq -40(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r13, %rdx
+ adcq %rbp, %r13
+ sbbq %r15, %r15
+ andl $1, %r15d
+ addq -88(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -80(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r11, %r14
+ movq -48(%rsp), %rdi ## 8-byte Reload
+ movq %r12, 24(%rdi)
+ adcq -64(%rsp), %r8 ## 8-byte Folded Reload
+ adcq %rax, %r13
+ adcq %rdx, %r15
+ movq (%rsi), %rdx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rbp
+ movq %rbp, -80(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdi
+ mulxq %rdi, %r12, %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ addq %rbx, %r12
+ movq %rbp, %rdx
+ mulxq %rdi, %rbx, %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ adcq %rcx, %rbx
+ movq 16(%rsi), %r11
+ movq %r11, %rdx
+ mulxq %rdi, %rax, %rcx
+ movq %rcx, -32(%rsp) ## 8-byte Spill
+ adcq %r14, %rax
+ movq 24(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rdi, %rbp, %rcx
+ movq %rcx, -40(%rsp) ## 8-byte Spill
+ adcq %r8, %rbp
+ movq 32(%rsi), %r8
+ movq %r8, %rdx
+ mulxq %rdi, %rcx, %r10
+ adcq %r13, %rcx
+ movq 40(%rsi), %r13
+ movq %r13, %rdx
+ mulxq %rdi, %r9, %rdx
+ adcq %r15, %r9
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq -64(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -72(%rsp), %rax ## 8-byte Folded Reload
+ adcq -32(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -40(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r10, %r9
+ adcq %rdx, %rsi
+ movq -48(%rsp), %r10 ## 8-byte Reload
+ movq %r12, 32(%r10)
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ movq 40(%rdx), %rdi
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r15, %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ addq %rbx, %r15
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rbx, %r12
+ adcq %rax, %rbx
+ movq %r11, %rdx
+ mulxq %rdi, %rax, %r11
+ adcq %rbp, %rax
+ movq %r14, %rdx
+ mulxq %rdi, %rbp, %r14
+ adcq %rcx, %rbp
+ movq %r8, %rdx
+ mulxq %rdi, %rcx, %r8
+ adcq %r9, %rcx
+ movq %r13, %rdx
+ mulxq %rdi, %rdi, %r9
+ adcq %rsi, %rdi
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq -56(%rsp), %rbx ## 8-byte Folded Reload
+ movq %r15, 40(%r10)
+ movq %rbx, 48(%r10)
+ adcq %r12, %rax
+ movq %rax, 56(%r10)
+ adcq %r11, %rbp
+ movq %rbp, 64(%r10)
+ adcq %r14, %rcx
+ movq %rcx, 72(%r10)
+ adcq %r8, %rdi
+ movq %rdi, 80(%r10)
+ adcq %r9, %rsi
+ movq %rsi, 88(%r10)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre6Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre6Lbmi2: ## @mcl_fpDbl_sqrPre6Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, %r9
+ movq 16(%rsi), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rax
+ mulxq %rcx, %r10, %r8
+ movq 24(%rsi), %rbp
+ movq %rbp, -48(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rcx, %r11, %rbx
+ movq %rbx, -40(%rsp) ## 8-byte Spill
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %r14
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ addq %r11, %r14
+ adcq %rbx, %r10
+ movq %rbp, %rdx
+ mulxq %rcx, %r15, %rbp
+ adcq %r8, %r15
+ movq 32(%rsi), %rbx
+ movq %rbx, %rdx
+ mulxq %rcx, %r8, %r13
+ adcq %rbp, %r8
+ movq 40(%rsi), %rdi
+ movq %rdi, %rdx
+ mulxq %rcx, %rcx, %r12
+ adcq %r13, %rcx
+ movq %r9, -24(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ movq %rdx, (%r9)
+ adcq $0, %r12
+ addq %r11, %r14
+ movq %rax, %rdx
+ mulxq %rax, %rbp, %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ adcq %r10, %rbp
+ movq -64(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r13, %r10
+ adcq %r15, %r13
+ movq -48(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r15, %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ adcq %r8, %r15
+ movq %rbx, %rdx
+ mulxq %rax, %rbx, %r8
+ adcq %rcx, %rbx
+ movq %rdi, %rdx
+ mulxq %rax, %r11, %rax
+ adcq %r12, %r11
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -40(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -56(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r14, 8(%r9)
+ adcq %r10, %r15
+ adcq -64(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %r8, %r11
+ adcq %rax, %r12
+ movq (%rsi), %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -64(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %rcx
+ mulxq %rcx, %rax, %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ addq %rbp, %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq %rdi, %rdx
+ mulxq %rcx, %rbp, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ adcq %r13, %rbp
+ movq %rcx, %rdx
+ mulxq %rcx, %r13, %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ adcq %r15, %r13
+ movq 24(%rsi), %rax
+ movq %rax, %rdx
+ mulxq %rcx, %r8, %rdi
+ movq %rdi, -56(%rsp) ## 8-byte Spill
+ adcq %r8, %rbx
+ movq 32(%rsi), %r10
+ movq %r10, %rdx
+ mulxq %rcx, %r14, %r15
+ adcq %r11, %r14
+ movq 40(%rsi), %r11
+ movq %r11, %rdx
+ mulxq %rcx, %r9, %rdx
+ adcq %r12, %r9
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq -32(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -8(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -16(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %rdi, %r14
+ adcq %r15, %r9
+ adcq %rdx, %rcx
+ movq -48(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %rdi, %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ addq %rbp, %rdi
+ movq -64(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r15, %rbp
+ adcq %r13, %r15
+ adcq %r8, %rbx
+ movq %rax, %rdx
+ mulxq %rax, %r8, %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ adcq %r14, %r8
+ movq %r10, %rdx
+ mulxq %rax, %r12, %r10
+ adcq %r9, %r12
+ movq %r11, %rdx
+ mulxq %rax, %r13, %rax
+ adcq %rcx, %r13
+ sbbq %r9, %r9
+ andl $1, %r9d
+ addq -48(%rsp), %r15 ## 8-byte Folded Reload
+ adcq %rbp, %rbx
+ movq -24(%rsp), %rdx ## 8-byte Reload
+ movq -40(%rsp), %rbp ## 8-byte Reload
+ movq %rbp, 16(%rdx)
+ movq %rdi, 24(%rdx)
+ adcq -56(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -64(%rsp), %r12 ## 8-byte Folded Reload
+ adcq %r10, %r13
+ adcq %rax, %r9
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rdi
+ movq %rdi, -64(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rcx, %rdx
+ mulxq %rax, %rdx, %rbp
+ movq %rbp, -56(%rsp) ## 8-byte Spill
+ addq %r15, %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rdi, %rdx
+ mulxq %rax, %r15, %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ adcq %rbx, %r15
+ movq 16(%rsi), %r10
+ movq %r10, %rdx
+ mulxq %rax, %r14, %rbx
+ adcq %r8, %r14
+ movq 24(%rsi), %r8
+ movq %r8, %rdx
+ mulxq %rax, %rbp, %rdi
+ adcq %r12, %rbp
+ movq %rax, %rdx
+ mulxq %rax, %r11, %r12
+ adcq %r13, %r11
+ movq 40(%rsi), %rsi
+ movq %rsi, %rdx
+ mulxq %rax, %r13, %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ adcq %r13, %r9
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq -56(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -32(%rsp), %r14 ## 8-byte Folded Reload
+ adcq %rbx, %rbp
+ adcq %rdi, %r11
+ adcq %r12, %r9
+ adcq %rdx, %rax
+ movq %rcx, %rdx
+ mulxq %rsi, %r12, %rcx
+ addq %r15, %r12
+ movq -64(%rsp), %rdx ## 8-byte Reload
+ mulxq %rsi, %rdi, %r15
+ adcq %r14, %rdi
+ movq %r10, %rdx
+ mulxq %rsi, %rbx, %r10
+ adcq %rbp, %rbx
+ movq %r8, %rdx
+ mulxq %rsi, %rbp, %r8
+ adcq %r11, %rbp
+ adcq %r13, %r9
+ movq %rsi, %rdx
+ mulxq %rsi, %rsi, %r11
+ adcq %rax, %rsi
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rcx, %rdi
+ movq -24(%rsp), %rdx ## 8-byte Reload
+ movq -40(%rsp), %rcx ## 8-byte Reload
+ movq %rcx, 32(%rdx)
+ movq %r12, 40(%rdx)
+ movq %rdi, 48(%rdx)
+ adcq %r15, %rbx
+ movq %rbx, 56(%rdx)
+ adcq %r10, %rbp
+ movq %rbp, 64(%rdx)
+ adcq %r8, %r9
+ movq %r9, 72(%rdx)
+ adcq -48(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 80(%rdx)
+ adcq %r11, %rax
+ movq %rax, 88(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont6Lbmi2: ## @mcl_fp_mont6Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $32, %rsp
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rdi, 24(%rsp) ## 8-byte Spill
+ movq 40(%rsi), %rdi
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %r11, %rbx
+ movq 32(%rsi), %rdx
+ movq %rdx, (%rsp) ## 8-byte Spill
+ mulxq %rax, %r14, %r12
+ movq 24(%rsi), %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ mulxq %rax, %r15, %r13
+ movq 16(%rsi), %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ mulxq %rax, %r8, %r10
+ movq (%rsi), %rbp
+ movq %rbp, -24(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ mulxq %rax, %rdi, %r9
+ movq %rbp, %rdx
+ mulxq %rax, %rdx, %rbp
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ addq %rdi, %rbp
+ adcq %r8, %r9
+ adcq %r15, %r10
+ adcq %r14, %r13
+ adcq %r11, %r12
+ adcq $0, %rbx
+ movq %rbx, -120(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ imulq %rax, %rdx
+ movq 40(%rcx), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulxq %rax, %rax, %r15
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ mulxq %rax, %r8, %rax
+ movq 8(%rcx), %rsi
+ movq %rsi, -56(%rsp) ## 8-byte Spill
+ mulxq %rsi, %rbx, %r11
+ movq (%rcx), %rsi
+ movq %rsi, -64(%rsp) ## 8-byte Spill
+ mulxq %rsi, %rsi, %r14
+ addq %rbx, %r14
+ adcq %r8, %r11
+ movq 24(%rcx), %rdi
+ movq %rdi, -72(%rsp) ## 8-byte Spill
+ mulxq %rdi, %rdi, %r8
+ adcq %rax, %rdi
+ movq 32(%rcx), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbx, %rax
+ adcq %r8, %rbx
+ adcq -112(%rsp), %rax ## 8-byte Folded Reload
+ adcq $0, %r15
+ addq -128(%rsp), %rsi ## 8-byte Folded Reload
+ adcq %rbp, %r14
+ adcq %r9, %r11
+ adcq %r10, %rdi
+ adcq %r13, %rbx
+ adcq %r12, %rax
+ adcq -120(%rsp), %r15 ## 8-byte Folded Reload
+ sbbq %r10, %r10
+ andl $1, %r10d
+ movq -88(%rsp), %rcx ## 8-byte Reload
+ movq 8(%rcx), %rdx
+ mulxq -96(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ mulxq (%rsp), %rcx, %r13 ## 8-byte Folded Reload
+ movq %rcx, -104(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %r12, %rcx ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %rbp, %rcx ## 8-byte Folded Reload
+ mulxq -24(%rsp), %rsi, %r9 ## 8-byte Folded Reload
+ addq %rbp, %r9
+ mulxq -16(%rsp), %rbp, %r8 ## 8-byte Folded Reload
+ adcq %rcx, %rbp
+ adcq %r12, %r8
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ adcq -112(%rsp), %r13 ## 8-byte Folded Reload
+ movq -120(%rsp), %rcx ## 8-byte Reload
+ adcq $0, %rcx
+ addq %r14, %rsi
+ adcq %r11, %r9
+ adcq %rdi, %rbp
+ adcq %rbx, %r8
+ adcq %rax, %rdx
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq %r15, %r13
+ adcq %r10, %rcx
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rsi, %rbx
+ movq %rbx, %rdx
+ imulq 8(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rax, %r12 ## 8-byte Folded Reload
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulxq -80(%rsp), %r14, %r11 ## 8-byte Folded Reload
+ mulxq -56(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rdi, %rsi ## 8-byte Folded Reload
+ addq %rcx, %rsi
+ mulxq -48(%rsp), %rcx, %r10 ## 8-byte Folded Reload
+ adcq %rax, %rcx
+ mulxq -72(%rsp), %rax, %r15 ## 8-byte Folded Reload
+ adcq %r10, %rax
+ adcq %r14, %r15
+ adcq -104(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %r12
+ addq %rbx, %rdi
+ adcq %r9, %rsi
+ adcq %rbp, %rcx
+ adcq %r8, %rax
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ adcq %r13, %r11
+ adcq -120(%rsp), %r12 ## 8-byte Folded Reload
+ movq -112(%rsp), %r10 ## 8-byte Reload
+ adcq $0, %r10
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq -96(%rsp), %rbp, %rdi ## 8-byte Folded Reload
+ movq %rbp, -112(%rsp) ## 8-byte Spill
+ movq %rdi, -120(%rsp) ## 8-byte Spill
+ mulxq (%rsp), %rdi, %rbp ## 8-byte Folded Reload
+ movq %rdi, -104(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %rdi, %r13 ## 8-byte Folded Reload
+ movq %rdi, 16(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %rdi, %r14 ## 8-byte Folded Reload
+ mulxq -24(%rsp), %rbx, %r9 ## 8-byte Folded Reload
+ movq %rbx, -128(%rsp) ## 8-byte Spill
+ addq %rdi, %r9
+ mulxq -16(%rsp), %rbx, %r8 ## 8-byte Folded Reload
+ adcq %r14, %rbx
+ adcq 16(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rbp ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ movq -128(%rsp), %rdi ## 8-byte Reload
+ addq %rsi, %rdi
+ movq %rdi, -128(%rsp) ## 8-byte Spill
+ adcq %rcx, %r9
+ adcq %rax, %rbx
+ adcq %r15, %r8
+ adcq %r11, %r13
+ adcq %r12, %rbp
+ adcq %r10, %rdx
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rdi, %rdx
+ imulq 8(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rax, %r11 ## 8-byte Folded Reload
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulxq -80(%rsp), %r15, %r12 ## 8-byte Folded Reload
+ mulxq -56(%rsp), %rax, %rcx ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rdi, %r14 ## 8-byte Folded Reload
+ addq %rax, %r14
+ mulxq -48(%rsp), %rax, %r10 ## 8-byte Folded Reload
+ adcq %rcx, %rax
+ mulxq -72(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ adcq %r10, %rsi
+ adcq %r15, %rcx
+ adcq -104(%rsp), %r12 ## 8-byte Folded Reload
+ adcq $0, %r11
+ addq -128(%rsp), %rdi ## 8-byte Folded Reload
+ adcq %r9, %r14
+ adcq %rbx, %rax
+ adcq %r8, %rsi
+ adcq %r13, %rcx
+ adcq %rbp, %r12
+ adcq -120(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, -112(%rsp) ## 8-byte Folded Spill
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ movq 24(%rdx), %rdx
+ mulxq -96(%rsp), %rbp, %rdi ## 8-byte Folded Reload
+ movq %rbp, -128(%rsp) ## 8-byte Spill
+ movq %rdi, -120(%rsp) ## 8-byte Spill
+ mulxq (%rsp), %rdi, %r15 ## 8-byte Folded Reload
+ movq %rdi, -104(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %r10, %rbp ## 8-byte Folded Reload
+ mulxq -32(%rsp), %rbx, %r9 ## 8-byte Folded Reload
+ mulxq -24(%rsp), %r13, %rdi ## 8-byte Folded Reload
+ addq %rbx, %rdi
+ mulxq -16(%rsp), %rbx, %r8 ## 8-byte Folded Reload
+ adcq %r9, %rbx
+ adcq %r10, %r8
+ adcq -104(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r14, %r13
+ adcq %rax, %rdi
+ adcq %rsi, %rbx
+ adcq %rcx, %r8
+ adcq %r12, %rbp
+ adcq %r11, %r15
+ adcq -112(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %r13, %rdx
+ imulq 8(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rax, %r10 ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulxq -80(%rsp), %rax, %r12 ## 8-byte Folded Reload
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulxq -56(%rsp), %rax, %r11 ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rcx, %rsi ## 8-byte Folded Reload
+ addq %rax, %rsi
+ mulxq -48(%rsp), %r14, %r9 ## 8-byte Folded Reload
+ adcq %r11, %r14
+ mulxq -72(%rsp), %rax, %r11 ## 8-byte Folded Reload
+ adcq %r9, %rax
+ adcq -104(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r12 ## 8-byte Folded Reload
+ adcq $0, %r10
+ addq %r13, %rcx
+ adcq %rdi, %rsi
+ adcq %rbx, %r14
+ adcq %r8, %rax
+ adcq %rbp, %r11
+ adcq %r15, %r12
+ adcq -120(%rsp), %r10 ## 8-byte Folded Reload
+ movq -128(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ movq -88(%rsp), %rcx ## 8-byte Reload
+ movq 32(%rcx), %rdx
+ mulxq -96(%rsp), %rdi, %rcx ## 8-byte Folded Reload
+ movq %rdi, -112(%rsp) ## 8-byte Spill
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ mulxq (%rsp), %rdi, %rcx ## 8-byte Folded Reload
+ movq %rdi, 16(%rsp) ## 8-byte Spill
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %r13, %rbp ## 8-byte Folded Reload
+ mulxq -32(%rsp), %rdi, %rcx ## 8-byte Folded Reload
+ mulxq -24(%rsp), %rbx, %r8 ## 8-byte Folded Reload
+ movq %rbx, -104(%rsp) ## 8-byte Spill
+ addq %rdi, %r8
+ mulxq -16(%rsp), %rbx, %r9 ## 8-byte Folded Reload
+ adcq %rcx, %rbx
+ adcq %r13, %r9
+ adcq 16(%rsp), %rbp ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq -112(%rsp), %rdx ## 8-byte Folded Reload
+ movq -120(%rsp), %rcx ## 8-byte Reload
+ adcq $0, %rcx
+ movq -104(%rsp), %rdi ## 8-byte Reload
+ addq %rsi, %rdi
+ movq %rdi, -104(%rsp) ## 8-byte Spill
+ adcq %r14, %r8
+ adcq %rax, %rbx
+ adcq %r11, %r9
+ adcq %r12, %rbp
+ adcq %r10, %rdx
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq %r15, %rcx
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, %r13
+ movq %rdi, %rdx
+ imulq 8(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -40(%rsp), %r14, %rax ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulxq -80(%rsp), %r12, %r15 ## 8-byte Folded Reload
+ mulxq -56(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rdi, %rsi ## 8-byte Folded Reload
+ addq %rcx, %rsi
+ mulxq -48(%rsp), %r11, %r10 ## 8-byte Folded Reload
+ adcq %rax, %r11
+ mulxq -72(%rsp), %rax, %rcx ## 8-byte Folded Reload
+ adcq %r10, %rax
+ adcq %r12, %rcx
+ adcq %r14, %r15
+ movq -112(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq -104(%rsp), %rdi ## 8-byte Folded Reload
+ adcq %r8, %rsi
+ adcq %rbx, %r11
+ adcq %r9, %rax
+ adcq %rbp, %rcx
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ adcq $0, %r13
+ movq %r13, -120(%rsp) ## 8-byte Spill
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ movq 40(%rdx), %rdx
+ mulxq -96(%rsp), %rbp, %rdi ## 8-byte Folded Reload
+ movq %rbp, -128(%rsp) ## 8-byte Spill
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ mulxq (%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %r10, %rbp ## 8-byte Folded Reload
+ mulxq -16(%rsp), %r8, %r12 ## 8-byte Folded Reload
+ mulxq -32(%rsp), %rdi, %r14 ## 8-byte Folded Reload
+ mulxq -24(%rsp), %r13, %r9 ## 8-byte Folded Reload
+ addq %rdi, %r9
+ adcq %r8, %r14
+ adcq %r10, %r12
+ adcq %rbx, %rbp
+ movq -96(%rsp), %rdi ## 8-byte Reload
+ adcq -128(%rsp), %rdi ## 8-byte Folded Reload
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %rsi, %r13
+ adcq %r11, %r9
+ adcq %rax, %r14
+ adcq %rcx, %r12
+ adcq %r15, %rbp
+ adcq -112(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ sbbq %rcx, %rcx
+ movq 8(%rsp), %rdx ## 8-byte Reload
+ imulq %r13, %rdx
+ mulxq -64(%rsp), %r8, %rax ## 8-byte Folded Reload
+ mulxq -56(%rsp), %r10, %rdi ## 8-byte Folded Reload
+ addq %rax, %r10
+ mulxq -48(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -72(%rsp), %rbx, %r11 ## 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -80(%rsp), %rdi, %r15 ## 8-byte Folded Reload
+ adcq %r11, %rdi
+ mulxq -40(%rsp), %rax, %r11 ## 8-byte Folded Reload
+ adcq %r15, %rax
+ adcq $0, %r11
+ andl $1, %ecx
+ addq %r13, %r8
+ adcq %r9, %r10
+ adcq %r14, %rsi
+ adcq %r12, %rbx
+ adcq %rbp, %rdi
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ adcq -88(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %rcx
+ movq %r10, %rbp
+ subq -64(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rsi, %rdx
+ sbbq -56(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rbx, %r8
+ sbbq -48(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rdi, %r9
+ sbbq -72(%rsp), %r9 ## 8-byte Folded Reload
+ movq %rax, %r14
+ sbbq -80(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r11, %r15
+ sbbq -40(%rsp), %r15 ## 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rdi, %r9
+ testb %cl, %cl
+ cmovneq %r10, %rbp
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ movq %rbp, (%rcx)
+ cmovneq %rsi, %rdx
+ movq %rdx, 8(%rcx)
+ cmovneq %rbx, %r8
+ movq %r8, 16(%rcx)
+ movq %r9, 24(%rcx)
+ cmovneq %rax, %r14
+ movq %r14, 32(%rcx)
+ cmovneq %r11, %r15
+ movq %r15, 40(%rcx)
+ addq $32, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF6Lbmi2: ## @mcl_fp_montNF6Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq (%rsi), %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -128(%rsp) ## 8-byte Spill
+ movq (%rdx), %rbp
+ movq %rdi, %rdx
+ mulxq %rbp, %rdi, %rbx
+ movq %rax, %rdx
+ mulxq %rbp, %r9, %r14
+ movq 16(%rsi), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ addq %rdi, %r14
+ mulxq %rbp, %rdi, %r8
+ adcq %rbx, %rdi
+ movq 24(%rsi), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ mulxq %rbp, %rbx, %r10
+ adcq %r8, %rbx
+ movq 32(%rsi), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rbp, %r8, %r11
+ adcq %r10, %r8
+ movq 40(%rsi), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rbp, %rsi, %r15
+ adcq %r11, %rsi
+ adcq $0, %r15
+ movq -8(%rcx), %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %r9, %rdx
+ imulq %rax, %rdx
+ movq (%rcx), %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbp, %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ addq %r9, %rbp
+ movq 8(%rcx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ mulxq %rax, %r12, %r9
+ adcq %r14, %r12
+ movq 16(%rcx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ mulxq %rax, %r14, %rax
+ adcq %rdi, %r14
+ movq 24(%rcx), %rdi
+ movq %rdi, -32(%rsp) ## 8-byte Spill
+ mulxq %rdi, %r13, %rdi
+ adcq %rbx, %r13
+ movq 32(%rcx), %rbp
+ movq %rbp, -40(%rsp) ## 8-byte Spill
+ mulxq %rbp, %r11, %rbx
+ adcq %r8, %r11
+ movq 40(%rcx), %rcx
+ movq %rcx, -48(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r10, %rcx
+ adcq %rsi, %r10
+ adcq $0, %r15
+ addq -88(%rsp), %r12 ## 8-byte Folded Reload
+ adcq %r9, %r14
+ adcq %rax, %r13
+ adcq %rdi, %r11
+ adcq %rbx, %r10
+ adcq %rcx, %r15
+ movq -120(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ mulxq -128(%rsp), %rcx, %rsi ## 8-byte Folded Reload
+ mulxq -112(%rsp), %rbx, %rax ## 8-byte Folded Reload
+ addq %rcx, %rax
+ mulxq -56(%rsp), %rcx, %rdi ## 8-byte Folded Reload
+ adcq %rsi, %rcx
+ mulxq -64(%rsp), %rsi, %r8 ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -72(%rsp), %rdi, %rbp ## 8-byte Folded Reload
+ movq %rbp, -88(%rsp) ## 8-byte Spill
+ adcq %r8, %rdi
+ mulxq -80(%rsp), %r8, %r9 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r8 ## 8-byte Folded Reload
+ adcq $0, %r9
+ addq %r12, %rbx
+ adcq %r14, %rax
+ adcq %r13, %rcx
+ adcq %r11, %rsi
+ adcq %r10, %rdi
+ adcq %r15, %r8
+ adcq $0, %r9
+ movq %rbx, %rdx
+ imulq -104(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -96(%rsp), %rbp, %r13 ## 8-byte Folded Reload
+ addq %rbx, %rbp
+ mulxq -16(%rsp), %r11, %rbx ## 8-byte Folded Reload
+ adcq %rax, %r11
+ mulxq -24(%rsp), %r14, %rax ## 8-byte Folded Reload
+ adcq %rcx, %r14
+ mulxq -32(%rsp), %r10, %rcx ## 8-byte Folded Reload
+ adcq %rsi, %r10
+ mulxq -40(%rsp), %r15, %rsi ## 8-byte Folded Reload
+ adcq %rdi, %r15
+ mulxq -48(%rsp), %r12, %rdx ## 8-byte Folded Reload
+ adcq %r8, %r12
+ adcq $0, %r9
+ addq %r13, %r11
+ adcq %rbx, %r14
+ adcq %rax, %r10
+ adcq %rcx, %r15
+ adcq %rsi, %r12
+ adcq %rdx, %r9
+ movq -120(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ mulxq -128(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r13, %rdi ## 8-byte Folded Reload
+ addq %rcx, %rdi
+ mulxq -56(%rsp), %rbx, %rcx ## 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -64(%rsp), %rsi, %rbp ## 8-byte Folded Reload
+ adcq %rcx, %rsi
+ mulxq -72(%rsp), %rax, %rcx ## 8-byte Folded Reload
+ movq %rcx, -88(%rsp) ## 8-byte Spill
+ adcq %rbp, %rax
+ mulxq -80(%rsp), %r8, %rcx ## 8-byte Folded Reload
+ adcq -88(%rsp), %r8 ## 8-byte Folded Reload
+ adcq $0, %rcx
+ addq %r11, %r13
+ adcq %r14, %rdi
+ adcq %r10, %rbx
+ adcq %r15, %rsi
+ adcq %r12, %rax
+ adcq %r9, %r8
+ adcq $0, %rcx
+ movq %r13, %rdx
+ imulq -104(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -96(%rsp), %rbp, %r12 ## 8-byte Folded Reload
+ addq %r13, %rbp
+ mulxq -16(%rsp), %r11, %rbp ## 8-byte Folded Reload
+ adcq %rdi, %r11
+ mulxq -24(%rsp), %r9, %rdi ## 8-byte Folded Reload
+ adcq %rbx, %r9
+ mulxq -32(%rsp), %r10, %rbx ## 8-byte Folded Reload
+ adcq %rsi, %r10
+ mulxq -40(%rsp), %r14, %rsi ## 8-byte Folded Reload
+ adcq %rax, %r14
+ mulxq -48(%rsp), %r15, %rax ## 8-byte Folded Reload
+ adcq %r8, %r15
+ adcq $0, %rcx
+ addq %r12, %r11
+ adcq %rbp, %r9
+ adcq %rdi, %r10
+ adcq %rbx, %r14
+ adcq %rsi, %r15
+ adcq %rax, %rcx
+ movq -120(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ mulxq -128(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r13, %rbx ## 8-byte Folded Reload
+ addq %rsi, %rbx
+ mulxq -56(%rsp), %rdi, %rbp ## 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -64(%rsp), %rsi, %r8 ## 8-byte Folded Reload
+ adcq %rbp, %rsi
+ mulxq -72(%rsp), %rax, %rbp ## 8-byte Folded Reload
+ adcq %r8, %rax
+ mulxq -80(%rsp), %r8, %r12 ## 8-byte Folded Reload
+ adcq %rbp, %r8
+ adcq $0, %r12
+ addq %r11, %r13
+ adcq %r9, %rbx
+ adcq %r10, %rdi
+ adcq %r14, %rsi
+ adcq %r15, %rax
+ adcq %rcx, %r8
+ adcq $0, %r12
+ movq %r13, %rdx
+ imulq -104(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -96(%rsp), %rbp, %rcx ## 8-byte Folded Reload
+ addq %r13, %rbp
+ mulxq -16(%rsp), %r11, %rbp ## 8-byte Folded Reload
+ adcq %rbx, %r11
+ mulxq -24(%rsp), %r9, %rbx ## 8-byte Folded Reload
+ adcq %rdi, %r9
+ mulxq -32(%rsp), %r10, %rdi ## 8-byte Folded Reload
+ adcq %rsi, %r10
+ mulxq -40(%rsp), %r14, %rsi ## 8-byte Folded Reload
+ adcq %rax, %r14
+ mulxq -48(%rsp), %r15, %rax ## 8-byte Folded Reload
+ adcq %r8, %r15
+ adcq $0, %r12
+ addq %rcx, %r11
+ adcq %rbp, %r9
+ adcq %rbx, %r10
+ adcq %rdi, %r14
+ adcq %rsi, %r15
+ adcq %rax, %r12
+ movq -120(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ mulxq -128(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r13, %rax ## 8-byte Folded Reload
+ addq %rsi, %rax
+ mulxq -56(%rsp), %rbx, %rsi ## 8-byte Folded Reload
+ adcq %rcx, %rbx
+ mulxq -64(%rsp), %rdi, %rcx ## 8-byte Folded Reload
+ adcq %rsi, %rdi
+ mulxq -72(%rsp), %rsi, %rbp ## 8-byte Folded Reload
+ adcq %rcx, %rsi
+ mulxq -80(%rsp), %r8, %rcx ## 8-byte Folded Reload
+ adcq %rbp, %r8
+ adcq $0, %rcx
+ addq %r11, %r13
+ adcq %r9, %rax
+ adcq %r10, %rbx
+ adcq %r14, %rdi
+ adcq %r15, %rsi
+ adcq %r12, %r8
+ adcq $0, %rcx
+ movq %r13, %rdx
+ imulq -104(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -96(%rsp), %rbp, %r9 ## 8-byte Folded Reload
+ addq %r13, %rbp
+ mulxq -16(%rsp), %r13, %rbp ## 8-byte Folded Reload
+ adcq %rax, %r13
+ mulxq -24(%rsp), %r11, %rax ## 8-byte Folded Reload
+ adcq %rbx, %r11
+ mulxq -32(%rsp), %r10, %rbx ## 8-byte Folded Reload
+ adcq %rdi, %r10
+ mulxq -40(%rsp), %r14, %rdi ## 8-byte Folded Reload
+ adcq %rsi, %r14
+ mulxq -48(%rsp), %rsi, %rdx ## 8-byte Folded Reload
+ adcq %r8, %rsi
+ adcq $0, %rcx
+ addq %r9, %r13
+ adcq %rbp, %r11
+ adcq %rax, %r10
+ adcq %rbx, %r14
+ adcq %rdi, %rsi
+ adcq %rdx, %rcx
+ movq -120(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ mulxq -128(%rsp), %rdi, %rax ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r8, %rbx ## 8-byte Folded Reload
+ addq %rdi, %rbx
+ mulxq -56(%rsp), %rdi, %rbp ## 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -64(%rsp), %r15, %rax ## 8-byte Folded Reload
+ adcq %rbp, %r15
+ mulxq -72(%rsp), %r12, %rbp ## 8-byte Folded Reload
+ adcq %rax, %r12
+ mulxq -80(%rsp), %r9, %rax ## 8-byte Folded Reload
+ adcq %rbp, %r9
+ adcq $0, %rax
+ addq %r13, %r8
+ adcq %r11, %rbx
+ adcq %r10, %rdi
+ adcq %r14, %r15
+ adcq %rsi, %r12
+ adcq %rcx, %r9
+ adcq $0, %rax
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ imulq %r8, %rdx
+ mulxq -96(%rsp), %rcx, %rsi ## 8-byte Folded Reload
+ movq %rsi, -104(%rsp) ## 8-byte Spill
+ addq %r8, %rcx
+ movq -16(%rsp), %r11 ## 8-byte Reload
+ mulxq %r11, %r8, %rcx
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ adcq %rbx, %r8
+ movq -24(%rsp), %r10 ## 8-byte Reload
+ mulxq %r10, %rsi, %rcx
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ adcq %rdi, %rsi
+ movq -32(%rsp), %r13 ## 8-byte Reload
+ mulxq %r13, %rdi, %rcx
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ adcq %r15, %rdi
+ movq -40(%rsp), %rcx ## 8-byte Reload
+ mulxq %rcx, %r15, %rbx
+ adcq %r12, %r15
+ movq -48(%rsp), %r14 ## 8-byte Reload
+ mulxq %r14, %r12, %rbp
+ adcq %r9, %r12
+ adcq $0, %rax
+ addq -104(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -120(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ adcq %rbx, %r12
+ adcq %rbp, %rax
+ movq %r8, %rbp
+ subq -96(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rsi, %rbx
+ sbbq %r11, %rbx
+ movq %rdi, %r11
+ sbbq %r10, %r11
+ movq %r15, %r10
+ sbbq %r13, %r10
+ movq %r12, %r9
+ sbbq %rcx, %r9
+ movq %rax, %rcx
+ sbbq %r14, %rcx
+ movq %rcx, %rdx
+ sarq $63, %rdx
+ cmovsq %r8, %rbp
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rbp, (%rdx)
+ cmovsq %rsi, %rbx
+ movq %rbx, 8(%rdx)
+ cmovsq %rdi, %r11
+ movq %r11, 16(%rdx)
+ cmovsq %r15, %r10
+ movq %r10, 24(%rdx)
+ cmovsq %r12, %r9
+ movq %r9, 32(%rdx)
+ cmovsq %rax, %rcx
+ movq %rcx, 40(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed6Lbmi2: ## @mcl_fp_montRed6Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ pushq %rax
+ movq %rdx, %rcx
+ movq %rdi, (%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq (%rsi), %r9
+ movq %r9, %rdx
+ imulq %rax, %rdx
+ movq 40(%rcx), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulxq %rax, %r12, %r13
+ movq 32(%rcx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ mulxq %rax, %r10, %r8
+ movq 24(%rcx), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ mulxq %rax, %r14, %r15
+ movq 16(%rcx), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbp, %r11
+ movq (%rcx), %rdi
+ movq %rdi, -40(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulxq %rax, %rax, %rbx
+ mulxq %rdi, %rdx, %rcx
+ addq %rax, %rcx
+ adcq %rbp, %rbx
+ adcq %r14, %r11
+ adcq %r10, %r15
+ adcq %r12, %r8
+ adcq $0, %r13
+ addq %r9, %rdx
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %rbx
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %r15
+ adcq 40(%rsi), %r8
+ movq %r8, -112(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %r13
+ movq %r13, -104(%rsp) ## 8-byte Spill
+ movq 88(%rsi), %r8
+ movq 80(%rsi), %rdx
+ movq 72(%rsi), %rdi
+ movq 64(%rsi), %rax
+ movq 56(%rsi), %r14
+ adcq $0, %r14
+ adcq $0, %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, -24(%rsp) ## 8-byte Spill
+ sbbq %r12, %r12
+ andl $1, %r12d
+ movq %rcx, %rdx
+ imulq -8(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -72(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ movq %rsi, -120(%rsp) ## 8-byte Spill
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ mulxq -16(%rsp), %rax, %r13 ## 8-byte Folded Reload
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulxq -48(%rsp), %rbp, %r10 ## 8-byte Folded Reload
+ mulxq -32(%rsp), %r9, %r8 ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rsi, %rdi ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rdx, %rax ## 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %r9, %rdi
+ adcq %rbp, %r8
+ adcq -56(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r13 ## 8-byte Folded Reload
+ movq -128(%rsp), %rsi ## 8-byte Reload
+ adcq $0, %rsi
+ addq %rcx, %rdx
+ adcq %rbx, %rax
+ adcq %r11, %rdi
+ adcq %r15, %r8
+ adcq -112(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ adcq %r14, %rsi
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ adcq $0, -88(%rsp) ## 8-byte Folded Spill
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ adcq $0, -80(%rsp) ## 8-byte Folded Spill
+ adcq $0, -24(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rdx
+ imulq -8(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -72(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ movq %rcx, -104(%rsp) ## 8-byte Spill
+ movq -16(%rsp), %rbx ## 8-byte Reload
+ mulxq %rbx, %rcx, %r14
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ mulxq -48(%rsp), %rcx, %r15 ## 8-byte Folded Reload
+ movq %rcx, -56(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %r11, %rbp ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rsi, %r9 ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rdx, %rcx ## 8-byte Folded Reload
+ addq %rsi, %rcx
+ adcq %r11, %r9
+ adcq -56(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -120(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ movq -104(%rsp), %rsi ## 8-byte Reload
+ adcq $0, %rsi
+ addq %rax, %rdx
+ adcq %rdi, %rcx
+ adcq %r8, %r9
+ adcq %r10, %rbp
+ adcq %r13, %r15
+ adcq -128(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -88(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -104(%rsp) ## 8-byte Spill
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ adcq $0, -80(%rsp) ## 8-byte Folded Spill
+ adcq $0, -24(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rcx, %rdx
+ movq -8(%rsp), %r13 ## 8-byte Reload
+ imulq %r13, %rdx
+ mulxq -72(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ mulxq %rbx, %rsi, %rax
+ movq %rsi, -120(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq -48(%rsp), %r11 ## 8-byte Reload
+ mulxq %r11, %rax, %rbx
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %r10, %r8 ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rsi, %rdi ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rdx, %rax ## 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %r10, %rdi
+ adcq -56(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rbx ## 8-byte Folded Reload
+ movq -88(%rsp), %r10 ## 8-byte Reload
+ adcq -112(%rsp), %r10 ## 8-byte Folded Reload
+ movq -128(%rsp), %rsi ## 8-byte Reload
+ adcq $0, %rsi
+ addq %rcx, %rdx
+ adcq %r9, %rax
+ adcq %rbp, %rdi
+ adcq %r15, %r8
+ adcq %r14, %rbx
+ adcq -104(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, -88(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ adcq $0, -80(%rsp) ## 8-byte Folded Spill
+ adcq $0, -24(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rdx
+ imulq %r13, %rdx
+ mulxq -72(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ movq %rsi, -104(%rsp) ## 8-byte Spill
+ movq %rcx, -96(%rsp) ## 8-byte Spill
+ mulxq -16(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ mulxq %r11, %rsi, %r13
+ movq %rsi, -120(%rsp) ## 8-byte Spill
+ movq -32(%rsp), %r10 ## 8-byte Reload
+ mulxq %r10, %r15, %r14
+ mulxq -64(%rsp), %rsi, %r9 ## 8-byte Folded Reload
+ movq -40(%rsp), %r11 ## 8-byte Reload
+ mulxq %r11, %rdx, %rbp
+ addq %rsi, %rbp
+ adcq %r15, %r9
+ adcq -120(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rcx ## 8-byte Folded Reload
+ movq -96(%rsp), %rsi ## 8-byte Reload
+ adcq $0, %rsi
+ addq %rax, %rdx
+ adcq %rdi, %rbp
+ adcq %r8, %r9
+ adcq %rbx, %r14
+ adcq -88(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -96(%rsp) ## 8-byte Spill
+ adcq $0, -24(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ imulq %rbp, %rdx
+ mulxq -72(%rsp), %rax, %rsi ## 8-byte Folded Reload
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulxq %r10, %rax, %r15
+ mulxq %r11, %r10, %rdi
+ mulxq -64(%rsp), %rbx, %r8 ## 8-byte Folded Reload
+ addq %rdi, %rbx
+ adcq %rax, %r8
+ mulxq -48(%rsp), %rax, %rdi ## 8-byte Folded Reload
+ adcq %r15, %rax
+ movq -16(%rsp), %r15 ## 8-byte Reload
+ mulxq %r15, %rdx, %r11
+ adcq %rdi, %rdx
+ adcq -80(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %rsi
+ addq %rbp, %r10
+ adcq %r9, %rbx
+ adcq %r14, %r8
+ adcq %r13, %rax
+ adcq -128(%rsp), %rdx ## 8-byte Folded Reload
+ adcq -96(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -24(%rsp), %rsi ## 8-byte Folded Reload
+ adcq $0, %r12
+ movq %rbx, %rcx
+ subq -40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %r8, %rdi
+ sbbq -64(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rax, %rbp
+ sbbq -32(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rdx, %r9
+ sbbq -48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r11, %r10
+ sbbq %r15, %r10
+ movq %rsi, %r15
+ sbbq -72(%rsp), %r15 ## 8-byte Folded Reload
+ sbbq $0, %r12
+ andl $1, %r12d
+ cmovneq %rsi, %r15
+ testb %r12b, %r12b
+ cmovneq %rbx, %rcx
+ movq (%rsp), %rsi ## 8-byte Reload
+ movq %rcx, (%rsi)
+ cmovneq %r8, %rdi
+ movq %rdi, 8(%rsi)
+ cmovneq %rax, %rbp
+ movq %rbp, 16(%rsi)
+ cmovneq %rdx, %r9
+ movq %r9, 24(%rsi)
+ cmovneq %r11, %r10
+ movq %r10, 32(%rsi)
+ movq %r15, 40(%rsi)
+ addq $8, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre6Lbmi2: ## @mcl_fp_addPre6Lbmi2
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 40(%rsi), %r11
+ movq 32(%rdx), %r9
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %r14
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r10, %rax
+ movq %rax, 24(%rdi)
+ adcq %r9, %r14
+ movq %r14, 32(%rdi)
+ adcq %r8, %r11
+ movq %r11, 40(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fp_subPre6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre6Lbmi2: ## @mcl_fp_subPre6Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 40(%rsi), %r9
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rsi
+ movq 24(%rdx), %r14
+ movq 32(%rdx), %r15
+ sbbq 16(%rdx), %rcx
+ movq %rbx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r14, %r11
+ movq %r11, 24(%rdi)
+ sbbq %r15, %r10
+ movq %r10, 32(%rdi)
+ sbbq %r8, %r9
+ movq %r9, 40(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_shr1_6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_6Lbmi2: ## @mcl_fp_shr1_6Lbmi2
+## BB#0:
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %rdx
+ movq 16(%rsi), %rax
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rcx
+ movq %rcx, (%rdi)
+ shrdq $1, %rax, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rdx, %rax
+ movq %rax, 16(%rdi)
+ shrdq $1, %r9, %rdx
+ movq %rdx, 24(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 32(%rdi)
+ shrq %r8
+ movq %r8, 40(%rdi)
+ retq
+
+ .globl _mcl_fp_add6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add6Lbmi2: ## @mcl_fp_add6Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r14
+ movq 40(%rsi), %r8
+ movq 32(%rdx), %r15
+ movq 24(%rdx), %rbx
+ movq 24(%rsi), %r10
+ movq 32(%rsi), %r9
+ movq 16(%rdx), %r11
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r11
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r11, 16(%rdi)
+ adcq %rbx, %r10
+ movq %r10, 24(%rdi)
+ adcq %r15, %r9
+ movq %r9, 32(%rdi)
+ adcq %r14, %r8
+ movq %r8, 40(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r11
+ sbbq 24(%rcx), %r10
+ sbbq 32(%rcx), %r9
+ sbbq 40(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB89_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r11, 16(%rdi)
+ movq %r10, 24(%rdi)
+ movq %r9, 32(%rdi)
+ movq %r8, 40(%rdi)
+LBB89_2: ## %carry
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addNF6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF6Lbmi2: ## @mcl_fp_addNF6Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 32(%rdx), %r9
+ movq 24(%rdx), %r10
+ movq 16(%rdx), %r11
+ movq (%rdx), %r15
+ movq 8(%rdx), %r14
+ addq (%rsi), %r15
+ adcq 8(%rsi), %r14
+ adcq 16(%rsi), %r11
+ adcq 24(%rsi), %r10
+ adcq 32(%rsi), %r9
+ adcq 40(%rsi), %r8
+ movq %r15, %rsi
+ subq (%rcx), %rsi
+ movq %r14, %rbx
+ sbbq 8(%rcx), %rbx
+ movq %r11, %rdx
+ sbbq 16(%rcx), %rdx
+ movq %r10, %r13
+ sbbq 24(%rcx), %r13
+ movq %r9, %r12
+ sbbq 32(%rcx), %r12
+ movq %r8, %rax
+ sbbq 40(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r15, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r14, %rbx
+ movq %rbx, 8(%rdi)
+ cmovsq %r11, %rdx
+ movq %rdx, 16(%rdi)
+ cmovsq %r10, %r13
+ movq %r13, 24(%rdi)
+ cmovsq %r9, %r12
+ movq %r12, 32(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_sub6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub6Lbmi2: ## @mcl_fp_sub6Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r14
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r10
+ movq 16(%rsi), %r11
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %rsi
+ movq 24(%rdx), %r15
+ movq 32(%rdx), %r12
+ sbbq 16(%rdx), %r11
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r11, 16(%rdi)
+ sbbq %r15, %r10
+ movq %r10, 24(%rdi)
+ sbbq %r12, %r9
+ movq %r9, 32(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 40(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB91_2
+## BB#1: ## %carry
+ movq 40(%rcx), %r14
+ movq 32(%rcx), %r15
+ movq 24(%rcx), %r12
+ movq 8(%rcx), %rbx
+ movq 16(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %rsi, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r11, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r10, %r12
+ movq %r12, 24(%rdi)
+ adcq %r9, %r15
+ movq %r15, 32(%rdi)
+ adcq %r8, %r14
+ movq %r14, 40(%rdi)
+LBB91_2: ## %nocarry
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subNF6Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF6Lbmi2: ## @mcl_fp_subNF6Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ movdqu 32(%rdx), %xmm2
+ pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1]
+ movd %xmm3, %r10
+ movdqu (%rsi), %xmm3
+ movdqu 16(%rsi), %xmm4
+ movdqu 32(%rsi), %xmm5
+ pshufd $78, %xmm5, %xmm6 ## xmm6 = xmm5[2,3,0,1]
+ movd %xmm6, %rax
+ movd %xmm2, %r11
+ movd %xmm5, %r8
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r14
+ pshufd $78, %xmm4, %xmm2 ## xmm2 = xmm4[2,3,0,1]
+ movd %xmm2, %r9
+ movd %xmm1, %r15
+ movd %xmm4, %r12
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %r13
+ pshufd $78, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,0,1]
+ movd %xmm1, %rbp
+ movd %xmm0, %rdx
+ movd %xmm3, %rbx
+ subq %rdx, %rbx
+ sbbq %r13, %rbp
+ sbbq %r15, %r12
+ sbbq %r14, %r9
+ sbbq %r11, %r8
+ sbbq %r10, %rax
+ movq %rax, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rsi
+ shldq $1, %rax, %rsi
+ andq (%rcx), %rsi
+ movq 40(%rcx), %r10
+ andq %rdx, %r10
+ movq 32(%rcx), %r11
+ andq %rdx, %r11
+ movq 24(%rcx), %r14
+ andq %rdx, %r14
+ rorxq $63, %rdx, %r15
+ andq 16(%rcx), %rdx
+ andq 8(%rcx), %r15
+ addq %rbx, %rsi
+ movq %rsi, (%rdi)
+ adcq %rbp, %r15
+ movq %r15, 8(%rdi)
+ adcq %r12, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r9, %r14
+ movq %r14, 24(%rdi)
+ adcq %r8, %r11
+ movq %r11, 32(%rdi)
+ adcq %rax, %r10
+ movq %r10, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_add6Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add6Lbmi2: ## @mcl_fpDbl_add6Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 88(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 80(%rdx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq 72(%rdx), %r14
+ movq 64(%rdx), %r15
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %r13
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %rbp
+ adcq 32(%rdx), %r13
+ movq 56(%rdx), %r11
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rdx
+ movq %rbx, (%rdi)
+ movq 88(%rsi), %r8
+ movq %rax, 8(%rdi)
+ movq 80(%rsi), %r10
+ movq %r12, 16(%rdi)
+ movq 72(%rsi), %r12
+ movq %rbp, 24(%rdi)
+ movq 40(%rsi), %rax
+ adcq %rdx, %rax
+ movq 64(%rsi), %rdx
+ movq %r13, 32(%rdi)
+ movq 56(%rsi), %r13
+ movq 48(%rsi), %rbp
+ adcq %r9, %rbp
+ movq %rax, 40(%rdi)
+ adcq %r11, %r13
+ adcq %r15, %rdx
+ adcq %r14, %r12
+ adcq -16(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -8(%rsp), %r8 ## 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rbp, %rsi
+ subq (%rcx), %rsi
+ movq %r13, %rbx
+ sbbq 8(%rcx), %rbx
+ movq %rdx, %r9
+ sbbq 16(%rcx), %r9
+ movq %r12, %r11
+ sbbq 24(%rcx), %r11
+ movq %r10, %r14
+ sbbq 32(%rcx), %r14
+ movq %r8, %r15
+ sbbq 40(%rcx), %r15
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rbp, %rsi
+ movq %rsi, 48(%rdi)
+ testb %al, %al
+ cmovneq %r13, %rbx
+ movq %rbx, 56(%rdi)
+ cmovneq %rdx, %r9
+ movq %r9, 64(%rdi)
+ cmovneq %r12, %r11
+ movq %r11, 72(%rdi)
+ cmovneq %r10, %r14
+ movq %r14, 80(%rdi)
+ cmovneq %r8, %r15
+ movq %r15, 88(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub6Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub6Lbmi2: ## @mcl_fpDbl_sub6Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 88(%rdx), %r9
+ movq 80(%rdx), %r10
+ movq 72(%rdx), %r14
+ movq 16(%rsi), %r8
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %eax, %eax
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r8
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 64(%rdx), %r13
+ movq %r15, (%rdi)
+ movq 56(%rdx), %rbp
+ movq %r11, 8(%rdi)
+ movq 48(%rdx), %r15
+ movq 40(%rdx), %rdx
+ movq %r8, 16(%rdi)
+ movq 88(%rsi), %r8
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %rdx, %rbx
+ movq 80(%rsi), %r11
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %rdx
+ sbbq %r15, %rdx
+ movq 72(%rsi), %r15
+ movq %rbx, 40(%rdi)
+ movq 64(%rsi), %r12
+ movq 56(%rsi), %rsi
+ sbbq %rbp, %rsi
+ sbbq %r13, %r12
+ sbbq %r14, %r15
+ sbbq %r10, %r11
+ sbbq %r9, %r8
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%rcx), %r14
+ cmoveq %rax, %r14
+ testb %bpl, %bpl
+ movq 16(%rcx), %r9
+ cmoveq %rax, %r9
+ movq 8(%rcx), %rbp
+ cmoveq %rax, %rbp
+ movq 40(%rcx), %r10
+ cmoveq %rax, %r10
+ movq 32(%rcx), %rbx
+ cmoveq %rax, %rbx
+ cmovneq 24(%rcx), %rax
+ addq %rdx, %r14
+ movq %r14, 48(%rdi)
+ adcq %rsi, %rbp
+ movq %rbp, 56(%rdi)
+ adcq %r12, %r9
+ movq %r9, 64(%rdi)
+ adcq %r15, %rax
+ movq %rax, 72(%rdi)
+ adcq %r11, %rbx
+ movq %rbx, 80(%rdi)
+ adcq %r8, %r10
+ movq %r10, 88(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mulUnitPre7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre7Lbmi2: ## @mcl_fp_mulUnitPre7Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ mulxq 48(%rsi), %r8, %r11
+ mulxq 40(%rsi), %r9, %r13
+ mulxq 32(%rsi), %r10, %rcx
+ mulxq 8(%rsi), %r12, %r14
+ mulxq (%rsi), %r15, %rbx
+ addq %r12, %rbx
+ mulxq 24(%rsi), %r12, %rax
+ mulxq 16(%rsi), %rdx, %rsi
+ movq %r15, (%rdi)
+ movq %rbx, 8(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r12, %rsi
+ movq %rsi, 24(%rdi)
+ adcq %r10, %rax
+ movq %rax, 32(%rdi)
+ adcq %r9, %rcx
+ movq %rcx, 40(%rdi)
+ adcq %r8, %r13
+ movq %r13, 48(%rdi)
+ adcq $0, %r11
+ movq %r11, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_mulPre7Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre7Lbmi2: ## @mcl_fpDbl_mulPre7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r14
+ movq %rsi, %r8
+ movq %rdi, %r13
+ movq %r13, -48(%rsp) ## 8-byte Spill
+ movq (%r8), %rcx
+ movq %rcx, -72(%rsp) ## 8-byte Spill
+ movq 8(%r8), %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq (%r14), %rsi
+ movq %r14, -64(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rsi, %rbp, %rax
+ movq %rcx, %rdx
+ mulxq %rsi, %rdx, %rcx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq 24(%r8), %rdi
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ movq 16(%r8), %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ addq %rbp, %rcx
+ mulxq %rsi, %rbx, %rbp
+ adcq %rax, %rbx
+ movq %rdi, %rdx
+ mulxq %rsi, %r12, %rax
+ adcq %rbp, %r12
+ movq 32(%r8), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rsi, %r9, %rbp
+ adcq %rax, %r9
+ movq 40(%r8), %rdi
+ movq %rdi, %rdx
+ mulxq %rsi, %r10, %rax
+ adcq %rbp, %r10
+ movq 48(%r8), %r15
+ movq %r15, %rdx
+ mulxq %rsi, %rsi, %r11
+ adcq %rax, %rsi
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq %rax, (%r13)
+ adcq $0, %r11
+ movq 8(%r14), %r13
+ movq -72(%rsp), %rdx ## 8-byte Reload
+ mulxq %r13, %r14, %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ addq %rcx, %r14
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ mulxq %r13, %rcx, %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ adcq %rbx, %rcx
+ movq -96(%rsp), %rdx ## 8-byte Reload
+ mulxq %r13, %rbx, %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ adcq %r12, %rbx
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %r13, %rbp, %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ adcq %r9, %rbp
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ mulxq %r13, %rax, %r9
+ adcq %r10, %rax
+ movq %rdi, %rdx
+ mulxq %r13, %r10, %rdi
+ adcq %rsi, %r10
+ movq %r15, %rdx
+ mulxq %r13, %r13, %rdx
+ adcq %r11, %r13
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -72(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -96(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -88(%rsp), %rax ## 8-byte Folded Reload
+ adcq %r9, %r10
+ movq -48(%rsp), %rsi ## 8-byte Reload
+ movq %r14, 8(%rsi)
+ adcq %rdi, %r13
+ adcq %rdx, %r12
+ movq (%r8), %rsi
+ movq %rsi, -88(%rsp) ## 8-byte Spill
+ movq 8(%r8), %r11
+ movq %r11, -104(%rsp) ## 8-byte Spill
+ movq -64(%rsp), %rdx ## 8-byte Reload
+ movq 16(%rdx), %rdi
+ movq %rsi, %rdx
+ mulxq %rdi, %r9, %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ addq %rcx, %r9
+ movq %r11, %rdx
+ mulxq %rdi, %r14, %rcx
+ movq %rcx, -16(%rsp) ## 8-byte Spill
+ adcq %rbx, %r14
+ movq 16(%r8), %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ mulxq %rdi, %rsi, %rcx
+ movq %rcx, -24(%rsp) ## 8-byte Spill
+ adcq %rbp, %rsi
+ movq 24(%r8), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rdi, %rbp, %rcx
+ movq %rcx, -32(%rsp) ## 8-byte Spill
+ adcq %rax, %rbp
+ movq 32(%r8), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rdi, %r11, %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ adcq %r10, %r11
+ movq 40(%r8), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ mulxq %rdi, %r15, %rax
+ adcq %r13, %r15
+ movq 48(%r8), %r13
+ movq %r13, %rdx
+ mulxq %rdi, %rcx, %rdx
+ adcq %r12, %rcx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq -8(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -16(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -24(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -32(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -40(%rsp), %r15 ## 8-byte Folded Reload
+ adcq %rax, %rcx
+ adcq %rdx, %rbx
+ movq -48(%rsp), %rax ## 8-byte Reload
+ movq %r9, 16(%rax)
+ movq -64(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdi
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r9, %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ addq %r14, %r9
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rax, %rdx
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq %rsi, %rax
+ movq -96(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r14, %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq %rbp, %r14
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r10, %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ adcq %r11, %r10
+ movq -72(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rbp, %rsi
+ adcq %r15, %rbp
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r11, %r15
+ adcq %rcx, %r11
+ movq %r13, %rdx
+ mulxq %rdi, %r13, %rcx
+ adcq %rbx, %r13
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -88(%rsp), %rax ## 8-byte Folded Reload
+ adcq -104(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -80(%rsp), %rbp ## 8-byte Folded Reload
+ adcq %rsi, %r11
+ movq -48(%rsp), %rdi ## 8-byte Reload
+ movq %r9, 24(%rdi)
+ adcq %r15, %r13
+ adcq %rcx, %r12
+ movq (%r8), %rdx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 8(%r8), %rbx
+ movq %rbx, -104(%rsp) ## 8-byte Spill
+ movq -64(%rsp), %rcx ## 8-byte Reload
+ movq 32(%rcx), %rcx
+ mulxq %rcx, %rsi, %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ addq %rax, %rsi
+ movq %rbx, %rdx
+ mulxq %rcx, %r9, %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ adcq %r14, %r9
+ movq 16(%r8), %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ mulxq %rcx, %rax, %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ adcq %r10, %rax
+ movq 24(%r8), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r15, %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ adcq %rbp, %r15
+ movq 32(%r8), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r10, %rbp
+ adcq %r11, %r10
+ movq 40(%r8), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r11, %rbx
+ adcq %r13, %r11
+ movq 48(%r8), %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r14, %rcx
+ adcq %r12, %r14
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -16(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -24(%rsp), %rax ## 8-byte Folded Reload
+ adcq -32(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -40(%rsp), %r10 ## 8-byte Folded Reload
+ adcq %rbp, %r11
+ adcq %rbx, %r14
+ adcq %rcx, %r12
+ movq %rsi, 32(%rdi)
+ movq -64(%rsp), %rsi ## 8-byte Reload
+ movq 40(%rsi), %rdi
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r13, %rcx
+ movq %rcx, -88(%rsp) ## 8-byte Spill
+ addq %r9, %r13
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rcx, %rdx
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq %rax, %rcx
+ movq -96(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rax, %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq %r15, %rax
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rbx, %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ adcq %r10, %rbx
+ movq -72(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %rbp, %r15
+ adcq %r11, %rbp
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r9, %r11
+ adcq %r14, %r9
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ mulxq %rdi, %r10, %rdx
+ adcq %r12, %r10
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ addq -88(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -104(%rsp), %rax ## 8-byte Folded Reload
+ adcq -96(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -80(%rsp), %rbp ## 8-byte Folded Reload
+ adcq %r15, %r9
+ movq -48(%rsp), %r14 ## 8-byte Reload
+ movq %r13, 40(%r14)
+ adcq %r11, %r10
+ adcq %rdx, %rdi
+ movq 48(%rsi), %rdx
+ mulxq (%r8), %r11, %rsi
+ movq %rsi, -64(%rsp) ## 8-byte Spill
+ addq %rcx, %r11
+ mulxq 8(%r8), %rsi, %r15
+ adcq %rax, %rsi
+ mulxq 16(%r8), %rcx, %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ adcq %rbx, %rcx
+ mulxq 24(%r8), %rbx, %r12
+ adcq %rbp, %rbx
+ mulxq 32(%r8), %rbp, %r13
+ adcq %r9, %rbp
+ mulxq 40(%r8), %rax, %r9
+ adcq %r10, %rax
+ mulxq 48(%r8), %rdx, %r8
+ adcq %rdi, %rdx
+ sbbq %r10, %r10
+ andl $1, %r10d
+ addq -64(%rsp), %rsi ## 8-byte Folded Reload
+ adcq %r15, %rcx
+ movq %r11, 48(%r14)
+ movq %rsi, 56(%r14)
+ movq %rcx, 64(%r14)
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 72(%r14)
+ adcq %r12, %rbp
+ movq %rbp, 80(%r14)
+ adcq %r13, %rax
+ movq %rax, 88(%r14)
+ adcq %r9, %rdx
+ movq %rdx, 96(%r14)
+ adcq %r8, %r10
+ movq %r10, 104(%r14)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre7Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre7Lbmi2: ## @mcl_fpDbl_sqrPre7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -40(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %rdx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rax
+ mulxq %rcx, %r8, %r10
+ movq 24(%rsi), %rbx
+ movq %rbx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ mulxq %rcx, %r12, %rbp
+ movq %rbp, -48(%rsp) ## 8-byte Spill
+ movq %rcx, %rdx
+ mulxq %rcx, %rdx, %rdi
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ addq %r12, %rdi
+ adcq %rbp, %r8
+ movq %rbx, %rdx
+ mulxq %rcx, %rbp, %r9
+ adcq %r10, %rbp
+ movq 32(%rsi), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r11, %r14
+ adcq %r9, %r11
+ movq 40(%rsi), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r10, %r15
+ adcq %r14, %r10
+ movq 48(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rcx, %rcx, %r13
+ adcq %r15, %rcx
+ movq -40(%rsp), %rdx ## 8-byte Reload
+ movq -80(%rsp), %rbx ## 8-byte Reload
+ movq %rbx, (%rdx)
+ adcq $0, %r13
+ addq %r12, %rdi
+ movq %rax, %rdx
+ mulxq %rax, %r12, %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ adcq %r8, %r12
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r8, %rdx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ adcq %rbp, %r8
+ movq -96(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r9, %rbp
+ adcq %r11, %r9
+ movq -72(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r15, %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq %r10, %r15
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r11, %rbx
+ adcq %rcx, %r11
+ movq %r14, %rdx
+ mulxq %rax, %r14, %rax
+ adcq %r13, %r14
+ sbbq %r13, %r13
+ andl $1, %r13d
+ addq -48(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r9 ## 8-byte Folded Reload
+ adcq %rbp, %r15
+ movq -40(%rsp), %rcx ## 8-byte Reload
+ movq %rdi, 8(%rcx)
+ adcq -96(%rsp), %r11 ## 8-byte Folded Reload
+ adcq %rbx, %r14
+ adcq %rax, %r13
+ movq (%rsi), %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rcx
+ movq %rcx, -88(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %rbx
+ mulxq %rbx, %rax, %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ addq %r12, %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %rcx, %rdx
+ mulxq %rbx, %r10, %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ adcq %r8, %r10
+ movq %rbx, %rdx
+ mulxq %rbx, %r12, %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ adcq %r9, %r12
+ movq 24(%rsi), %rax
+ movq %rax, %rdx
+ mulxq %rbx, %r8, %rdi
+ movq %rdi, -56(%rsp) ## 8-byte Spill
+ adcq %r8, %r15
+ movq 32(%rsi), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rbx, %rcx, %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ adcq %r11, %rcx
+ movq 40(%rsi), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rbx, %rbp, %r11
+ adcq %r14, %rbp
+ movq 48(%rsi), %r14
+ movq %r14, %rdx
+ mulxq %rbx, %r9, %rdx
+ adcq %r13, %r9
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq -64(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -16(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -24(%rsp), %r15 ## 8-byte Folded Reload
+ adcq %rdi, %rcx
+ adcq -32(%rsp), %rbp ## 8-byte Folded Reload
+ adcq %r11, %r9
+ adcq %rdx, %rbx
+ movq -96(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %rdi, %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ addq %r10, %rdi
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r11, %rdx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ adcq %r12, %r11
+ adcq %r8, %r15
+ movq %rax, %rdx
+ mulxq %rax, %r8, %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ adcq %rcx, %r8
+ movq -72(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r13, %rcx
+ movq %rcx, -72(%rsp) ## 8-byte Spill
+ adcq %rbp, %r13
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r12, %rbp
+ adcq %r9, %r12
+ movq %r14, %rdx
+ mulxq %rax, %rcx, %rax
+ adcq %rbx, %rcx
+ sbbq %r10, %r10
+ andl $1, %r10d
+ addq -96(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -56(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -64(%rsp), %r13 ## 8-byte Folded Reload
+ movq -40(%rsp), %rdx ## 8-byte Reload
+ movq -48(%rsp), %rbx ## 8-byte Reload
+ movq %rbx, 16(%rdx)
+ movq %rdi, 24(%rdx)
+ adcq -72(%rsp), %r12 ## 8-byte Folded Reload
+ adcq %rbp, %rcx
+ adcq %rax, %r10
+ movq (%rsi), %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %rbx
+ mulxq %rbx, %rax, %rdx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ addq %r11, %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %rdi, %rdx
+ mulxq %rbx, %r9, %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ adcq %r15, %r9
+ movq 16(%rsi), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rbx, %r15, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ adcq %r8, %r15
+ movq 24(%rsi), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rbx, %r8, %rbp
+ adcq %r13, %r8
+ movq %rbx, %rdx
+ mulxq %rbx, %r13, %r14
+ adcq %r12, %r13
+ movq 40(%rsi), %rax
+ movq %rax, %rdx
+ mulxq %rbx, %rdx, %rdi
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rdi, -56(%rsp) ## 8-byte Spill
+ adcq %rdx, %rcx
+ movq 48(%rsi), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ mulxq %rbx, %r11, %rdx
+ adcq %r10, %r11
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -24(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -32(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -8(%rsp), %r8 ## 8-byte Folded Reload
+ adcq %rbp, %r13
+ adcq %r14, %rcx
+ adcq %rdi, %r11
+ adcq %rdx, %r12
+ movq -96(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r14, %rdi
+ addq %r9, %r14
+ movq -88(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %rbx, %rdx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ adcq %r15, %rbx
+ movq -72(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %rbp, %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq %r8, %rbp
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %r10, %r15
+ adcq %r13, %r10
+ adcq -16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rax, %rdx
+ mulxq %rax, %r9, %r13
+ adcq %r11, %r9
+ movq -64(%rsp), %rdx ## 8-byte Reload
+ mulxq %rax, %rax, %r11
+ adcq %r12, %rax
+ sbbq %r8, %r8
+ andl $1, %r8d
+ addq %rdi, %rbx
+ adcq -88(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -96(%rsp), %r10 ## 8-byte Folded Reload
+ adcq %r15, %rcx
+ movq -40(%rsp), %rdi ## 8-byte Reload
+ movq -48(%rsp), %rdx ## 8-byte Reload
+ movq %rdx, 32(%rdi)
+ movq %r14, 40(%rdi)
+ adcq -56(%rsp), %r9 ## 8-byte Folded Reload
+ adcq %r13, %rax
+ adcq %r11, %r8
+ movq 48(%rsi), %rdx
+ mulxq (%rsi), %r12, %r11
+ addq %rbx, %r12
+ mulxq 8(%rsi), %rbx, %r14
+ adcq %rbp, %rbx
+ mulxq 16(%rsi), %rbp, %r15
+ adcq %r10, %rbp
+ mulxq 24(%rsi), %rdi, %r10
+ adcq %rcx, %rdi
+ mulxq 32(%rsi), %rcx, %r13
+ adcq %r9, %rcx
+ mulxq 40(%rsi), %rsi, %r9
+ adcq %rax, %rsi
+ mulxq %rdx, %rdx, %rax
+ adcq %r8, %rdx
+ sbbq %r8, %r8
+ andl $1, %r8d
+ addq %r11, %rbx
+ adcq %r14, %rbp
+ movq -40(%rsp), %r11 ## 8-byte Reload
+ movq %r12, 48(%r11)
+ movq %rbx, 56(%r11)
+ movq %rbp, 64(%r11)
+ adcq %r15, %rdi
+ movq %rdi, 72(%r11)
+ adcq %r10, %rcx
+ movq %rcx, 80(%r11)
+ adcq %r13, %rsi
+ movq %rsi, 88(%r11)
+ adcq %r9, %rdx
+ movq %rdx, 96(%r11)
+ adcq %rax, %r8
+ movq %r8, 104(%r11)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont7Lbmi2: ## @mcl_fp_mont7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $56, %rsp
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rdi, 48(%rsp) ## 8-byte Spill
+ movq 48(%rsi), %rdi
+ movq %rdi, -64(%rsp) ## 8-byte Spill
+ movq (%rdx), %rax
+ movq %rdi, %rdx
+ mulxq %rax, %rdx, %r13
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq 40(%rsi), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ mulxq %rax, %rdx, %r8
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ mulxq %rax, %r10, %rdi
+ movq 24(%rsi), %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ mulxq %rax, %r14, %rbp
+ movq 16(%rsi), %rdx
+ movq %rdx, 32(%rsp) ## 8-byte Spill
+ mulxq %rax, %r12, %r15
+ movq (%rsi), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rdx
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ mulxq %rax, %rsi, %r11
+ movq %rbx, %rdx
+ mulxq %rax, %rdx, %r9
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ addq %rsi, %r9
+ adcq %r12, %r11
+ adcq %r14, %r15
+ adcq %r10, %rbp
+ movq %rbp, -112(%rsp) ## 8-byte Spill
+ adcq -48(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -104(%rsp) ## 8-byte Spill
+ adcq -40(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -128(%rsp) ## 8-byte Spill
+ adcq $0, %r13
+ movq %r13, -120(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ imulq %rax, %rdx
+ movq 32(%rcx), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbx, %r13
+ movq 16(%rcx), %rsi
+ movq %rsi, -48(%rsp) ## 8-byte Spill
+ mulxq %rsi, %r14, %rbp
+ movq 8(%rcx), %rsi
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ mulxq %rsi, %rsi, %rax
+ movq (%rcx), %rdi
+ movq %rdi, (%rsp) ## 8-byte Spill
+ mulxq %rdi, %r8, %r12
+ addq %rsi, %r12
+ adcq %r14, %rax
+ movq %rax, %rdi
+ movq 24(%rcx), %rsi
+ movq %rsi, -8(%rsp) ## 8-byte Spill
+ mulxq %rsi, %r10, %r14
+ adcq %rbp, %r10
+ adcq %rbx, %r14
+ movq 40(%rcx), %rsi
+ movq %rsi, -16(%rsp) ## 8-byte Spill
+ mulxq %rsi, %rbp, %rsi
+ adcq %r13, %rbp
+ movq 48(%rcx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ mulxq %rax, %rax, %rbx
+ adcq %rsi, %rax
+ adcq $0, %rbx
+ addq -96(%rsp), %r8 ## 8-byte Folded Reload
+ adcq %r9, %r12
+ adcq %r11, %rdi
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ adcq %r15, %r10
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ adcq -120(%rsp), %rbx ## 8-byte Folded Reload
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq -56(%rsp), %rcx ## 8-byte Reload
+ movq 8(%rcx), %rdx
+ mulxq -64(%rsp), %rdi, %rcx ## 8-byte Folded Reload
+ movq %rdi, -104(%rsp) ## 8-byte Spill
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ mulxq -72(%rsp), %rdi, %rcx ## 8-byte Folded Reload
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ mulxq 16(%rsp), %r9, %r8 ## 8-byte Folded Reload
+ mulxq 24(%rsp), %rdi, %r11 ## 8-byte Folded Reload
+ movq %rdi, -112(%rsp) ## 8-byte Spill
+ addq %r9, %r11
+ mulxq 32(%rsp), %rcx, %r9 ## 8-byte Folded Reload
+ adcq %r8, %rcx
+ movq %rcx, %rdi
+ mulxq -32(%rsp), %r13, %rcx ## 8-byte Folded Reload
+ adcq %r9, %r13
+ mulxq -80(%rsp), %r8, %r15 ## 8-byte Folded Reload
+ adcq %rcx, %r8
+ adcq -88(%rsp), %r15 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq -120(%rsp), %rcx ## 8-byte Reload
+ adcq $0, %rcx
+ movq -112(%rsp), %r9 ## 8-byte Reload
+ addq %r12, %r9
+ movq %r9, -112(%rsp) ## 8-byte Spill
+ movq %r11, %r12
+ adcq -96(%rsp), %r12 ## 8-byte Folded Reload
+ adcq %r10, %rdi
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ adcq %r14, %r13
+ adcq %rbp, %r8
+ adcq %rax, %r15
+ adcq %rbx, %rdx
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq %rsi, %rcx
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %r9, %rdx
+ imulq 40(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -24(%rsp), %r10, %rax ## 8-byte Folded Reload
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq 8(%rsp), %rdi, %rbx ## 8-byte Folded Reload
+ mulxq (%rsp), %r14, %r9 ## 8-byte Folded Reload
+ addq %rdi, %r9
+ mulxq -48(%rsp), %rbp, %r11 ## 8-byte Folded Reload
+ adcq %rbx, %rbp
+ adcq %rcx, %r11
+ mulxq -40(%rsp), %rbx, %rsi ## 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -16(%rsp), %rax, %rcx ## 8-byte Folded Reload
+ adcq %rsi, %rax
+ adcq %r10, %rcx
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq %r12, %r9
+ adcq -88(%rsp), %rbp ## 8-byte Folded Reload
+ adcq %r13, %r11
+ adcq %r8, %rbx
+ adcq %r15, %rax
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ movq 16(%rdx), %rdx
+ mulxq -64(%rsp), %rdi, %rsi ## 8-byte Folded Reload
+ movq %rdi, -112(%rsp) ## 8-byte Spill
+ movq %rsi, -120(%rsp) ## 8-byte Spill
+ mulxq -72(%rsp), %rdi, %rsi ## 8-byte Folded Reload
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ mulxq 32(%rsp), %rdi, %r10 ## 8-byte Folded Reload
+ mulxq 16(%rsp), %rsi, %r13 ## 8-byte Folded Reload
+ mulxq 24(%rsp), %r8, %r15 ## 8-byte Folded Reload
+ addq %rsi, %r15
+ adcq %rdi, %r13
+ mulxq -32(%rsp), %r12, %rsi ## 8-byte Folded Reload
+ adcq %r10, %r12
+ mulxq -80(%rsp), %r10, %r14 ## 8-byte Folded Reload
+ adcq %rsi, %r10
+ adcq -88(%rsp), %r14 ## 8-byte Folded Reload
+ movq -128(%rsp), %rsi ## 8-byte Reload
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r9, %r8
+ movq %r8, -112(%rsp) ## 8-byte Spill
+ adcq %rbp, %r15
+ adcq %r11, %r13
+ adcq %rbx, %r12
+ adcq %rax, %r10
+ adcq %rcx, %r14
+ adcq -104(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, %rbx
+ movq %r8, %rdx
+ imulq 40(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rcx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq 8(%rsp), %rbp, %rsi ## 8-byte Folded Reload
+ mulxq (%rsp), %r11, %r8 ## 8-byte Folded Reload
+ addq %rbp, %r8
+ mulxq -48(%rsp), %rbp, %r9 ## 8-byte Folded Reload
+ adcq %rsi, %rbp
+ adcq %rcx, %r9
+ mulxq -40(%rsp), %rsi, %rdi ## 8-byte Folded Reload
+ adcq %rax, %rsi
+ mulxq -16(%rsp), %rax, %rcx ## 8-byte Folded Reload
+ adcq %rdi, %rax
+ adcq -96(%rsp), %rcx ## 8-byte Folded Reload
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq -112(%rsp), %r11 ## 8-byte Folded Reload
+ adcq %r15, %r8
+ adcq %r13, %rbp
+ adcq %r12, %r9
+ adcq %r10, %rsi
+ adcq %r14, %rax
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq $0, %rbx
+ movq %rbx, -128(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ movq 24(%rdx), %rdx
+ mulxq -64(%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ movq %rbx, -96(%rsp) ## 8-byte Spill
+ movq %rdi, -120(%rsp) ## 8-byte Spill
+ mulxq -72(%rsp), %rdi, %r13 ## 8-byte Folded Reload
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ mulxq 32(%rsp), %r10, %r11 ## 8-byte Folded Reload
+ mulxq 16(%rsp), %rdi, %r15 ## 8-byte Folded Reload
+ mulxq 24(%rsp), %rbx, %r12 ## 8-byte Folded Reload
+ movq %rbx, -112(%rsp) ## 8-byte Spill
+ addq %rdi, %r12
+ adcq %r10, %r15
+ mulxq -32(%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ adcq %r11, %rbx
+ mulxq -80(%rsp), %r10, %r14 ## 8-byte Folded Reload
+ adcq %rdi, %r10
+ adcq -88(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r13 ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ movq -112(%rsp), %rdi ## 8-byte Reload
+ addq %r8, %rdi
+ movq %rdi, -112(%rsp) ## 8-byte Spill
+ adcq %rbp, %r12
+ adcq %r9, %r15
+ adcq %rsi, %rbx
+ adcq %rax, %r10
+ adcq %rcx, %r14
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -128(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdi, %rdx
+ imulq 40(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rcx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq 8(%rsp), %rbp, %rsi ## 8-byte Folded Reload
+ mulxq (%rsp), %r11, %r8 ## 8-byte Folded Reload
+ addq %rbp, %r8
+ mulxq -48(%rsp), %rbp, %r9 ## 8-byte Folded Reload
+ adcq %rsi, %rbp
+ adcq %rcx, %r9
+ mulxq -40(%rsp), %rsi, %rdi ## 8-byte Folded Reload
+ adcq %rax, %rsi
+ mulxq -16(%rsp), %rax, %rcx ## 8-byte Folded Reload
+ adcq %rdi, %rax
+ adcq -96(%rsp), %rcx ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq -112(%rsp), %r11 ## 8-byte Folded Reload
+ adcq %r12, %r8
+ adcq %r15, %rbp
+ adcq %rbx, %r9
+ adcq %r10, %rsi
+ adcq %r14, %rax
+ adcq %r13, %rcx
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq $0, -104(%rsp) ## 8-byte Folded Spill
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ movq 32(%rdx), %rdx
+ mulxq -64(%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ movq %rbx, -112(%rsp) ## 8-byte Spill
+ movq %rdi, -120(%rsp) ## 8-byte Spill
+ mulxq -72(%rsp), %rdi, %r11 ## 8-byte Folded Reload
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ mulxq 32(%rsp), %r10, %r13 ## 8-byte Folded Reload
+ mulxq 16(%rsp), %rdi, %r15 ## 8-byte Folded Reload
+ mulxq 24(%rsp), %rbx, %r12 ## 8-byte Folded Reload
+ addq %rdi, %r12
+ adcq %r10, %r15
+ mulxq -32(%rsp), %r10, %rdi ## 8-byte Folded Reload
+ adcq %r13, %r10
+ mulxq -80(%rsp), %r13, %r14 ## 8-byte Folded Reload
+ adcq %rdi, %r13
+ adcq -96(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r11 ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r8, %rbx
+ movq %rbx, -96(%rsp) ## 8-byte Spill
+ adcq %rbp, %r12
+ adcq %r9, %r15
+ adcq %rsi, %r10
+ adcq %rax, %r13
+ adcq %rcx, %r14
+ adcq -128(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, -88(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rbx, %rdx
+ imulq 40(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq 8(%rsp), %rbp, %rsi ## 8-byte Folded Reload
+ mulxq (%rsp), %r9, %r11 ## 8-byte Folded Reload
+ addq %rbp, %r11
+ mulxq -48(%rsp), %rbp, %r8 ## 8-byte Folded Reload
+ adcq %rsi, %rbp
+ adcq %rcx, %r8
+ mulxq -40(%rsp), %rsi, %rdi ## 8-byte Folded Reload
+ adcq %rax, %rsi
+ mulxq -16(%rsp), %rax, %rcx ## 8-byte Folded Reload
+ adcq %rdi, %rax
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq -96(%rsp), %r9 ## 8-byte Folded Reload
+ adcq %r12, %r11
+ adcq %r15, %rbp
+ adcq %r10, %r8
+ adcq %r13, %rsi
+ adcq %r14, %rax
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq $0, -112(%rsp) ## 8-byte Folded Spill
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ movq 40(%rdx), %rdx
+ mulxq -64(%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ movq %rbx, -96(%rsp) ## 8-byte Spill
+ movq %rdi, -120(%rsp) ## 8-byte Spill
+ mulxq -72(%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ movq %rbx, -88(%rsp) ## 8-byte Spill
+ movq %rdi, -128(%rsp) ## 8-byte Spill
+ mulxq 32(%rsp), %rbx, %r10 ## 8-byte Folded Reload
+ mulxq 16(%rsp), %rdi, %r13 ## 8-byte Folded Reload
+ mulxq 24(%rsp), %r9, %r12 ## 8-byte Folded Reload
+ addq %rdi, %r12
+ adcq %rbx, %r13
+ mulxq -32(%rsp), %r15, %rdi ## 8-byte Folded Reload
+ adcq %r10, %r15
+ mulxq -80(%rsp), %r10, %r14 ## 8-byte Folded Reload
+ adcq %rdi, %r10
+ adcq -88(%rsp), %r14 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdi ## 8-byte Reload
+ adcq -96(%rsp), %rdi ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r11, %r9
+ movq %r9, -96(%rsp) ## 8-byte Spill
+ adcq %rbp, %r12
+ adcq %r8, %r13
+ adcq %rsi, %r15
+ adcq %rax, %r10
+ adcq %rcx, %r14
+ adcq -104(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -128(%rsp) ## 8-byte Spill
+ adcq -112(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %r9, %rdx
+ imulq 40(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rcx, -88(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulxq -8(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq 8(%rsp), %rdi, %rsi ## 8-byte Folded Reload
+ mulxq (%rsp), %r11, %rbx ## 8-byte Folded Reload
+ addq %rdi, %rbx
+ mulxq -48(%rsp), %r8, %r9 ## 8-byte Folded Reload
+ adcq %rsi, %r8
+ adcq %rcx, %r9
+ mulxq -40(%rsp), %rdi, %rbp ## 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -16(%rsp), %rcx, %rsi ## 8-byte Folded Reload
+ adcq %rbp, %rcx
+ adcq -88(%rsp), %rsi ## 8-byte Folded Reload
+ movq -104(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq -96(%rsp), %r11 ## 8-byte Folded Reload
+ adcq %r12, %rbx
+ adcq %r13, %r8
+ adcq %r15, %r9
+ adcq %r10, %rdi
+ adcq %r14, %rcx
+ adcq -128(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -120(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq -112(%rsp), %r12 ## 8-byte Reload
+ adcq $0, %r12
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ mulxq -64(%rsp), %rbp, %rax ## 8-byte Folded Reload
+ movq %rbp, -120(%rsp) ## 8-byte Spill
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulxq -72(%rsp), %rbp, %rax ## 8-byte Folded Reload
+ movq %rbp, -128(%rsp) ## 8-byte Spill
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulxq -80(%rsp), %rbp, %rax ## 8-byte Folded Reload
+ movq %rbp, -112(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %r13, %rbp ## 8-byte Folded Reload
+ mulxq 32(%rsp), %r14, %r15 ## 8-byte Folded Reload
+ mulxq 16(%rsp), %rax, %r11 ## 8-byte Folded Reload
+ mulxq 24(%rsp), %rdx, %r10 ## 8-byte Folded Reload
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ addq %rax, %r10
+ adcq %r14, %r11
+ adcq %r13, %r15
+ adcq -112(%rsp), %rbp ## 8-byte Folded Reload
+ movq -72(%rsp), %r14 ## 8-byte Reload
+ adcq -128(%rsp), %r14 ## 8-byte Folded Reload
+ movq -64(%rsp), %rdx ## 8-byte Reload
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq -56(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ movq -80(%rsp), %r13 ## 8-byte Reload
+ addq %rbx, %r13
+ movq %r13, -80(%rsp) ## 8-byte Spill
+ adcq %r8, %r10
+ adcq %r9, %r11
+ adcq %rdi, %r15
+ adcq %rcx, %rbp
+ movq %rbp, -32(%rsp) ## 8-byte Spill
+ adcq %rsi, %r14
+ movq %r14, -72(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ adcq %r12, %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ sbbq %rdi, %rdi
+ movq 40(%rsp), %rdx ## 8-byte Reload
+ imulq %r13, %rdx
+ mulxq -8(%rsp), %rbp, %rsi ## 8-byte Folded Reload
+ mulxq 8(%rsp), %rcx, %rbx ## 8-byte Folded Reload
+ mulxq (%rsp), %r13, %rax ## 8-byte Folded Reload
+ addq %rcx, %rax
+ mulxq -48(%rsp), %rcx, %r9 ## 8-byte Folded Reload
+ adcq %rbx, %rcx
+ adcq %rbp, %r9
+ mulxq -40(%rsp), %rbp, %rbx ## 8-byte Folded Reload
+ adcq %rsi, %rbp
+ mulxq -16(%rsp), %rsi, %r14 ## 8-byte Folded Reload
+ adcq %rbx, %rsi
+ mulxq -24(%rsp), %rdx, %rbx ## 8-byte Folded Reload
+ adcq %r14, %rdx
+ adcq $0, %rbx
+ andl $1, %edi
+ addq -80(%rsp), %r13 ## 8-byte Folded Reload
+ adcq %r10, %rax
+ adcq %r11, %rcx
+ adcq %r15, %r9
+ adcq -32(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -72(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -64(%rsp), %rdx ## 8-byte Folded Reload
+ adcq -56(%rsp), %rbx ## 8-byte Folded Reload
+ adcq $0, %rdi
+ movq %rax, %r8
+ subq (%rsp), %r8 ## 8-byte Folded Reload
+ movq %rcx, %r10
+ sbbq 8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r9, %r11
+ sbbq -48(%rsp), %r11 ## 8-byte Folded Reload
+ movq %rbp, %r14
+ sbbq -8(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rsi, %r15
+ sbbq -40(%rsp), %r15 ## 8-byte Folded Reload
+ movq %rdx, %r12
+ sbbq -16(%rsp), %r12 ## 8-byte Folded Reload
+ movq %rbx, %r13
+ sbbq -24(%rsp), %r13 ## 8-byte Folded Reload
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %rbx, %r13
+ testb %dil, %dil
+ cmovneq %rax, %r8
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq %r8, (%rax)
+ cmovneq %rcx, %r10
+ movq %r10, 8(%rax)
+ cmovneq %r9, %r11
+ movq %r11, 16(%rax)
+ cmovneq %rbp, %r14
+ movq %r14, 24(%rax)
+ cmovneq %rsi, %r15
+ movq %r15, 32(%rax)
+ cmovneq %rdx, %r12
+ movq %r12, 40(%rax)
+ movq %r13, 48(%rax)
+ addq $56, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF7Lbmi2: ## @mcl_fp_montNF7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $40, %rsp
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rdi, 32(%rsp) ## 8-byte Spill
+ movq (%rsi), %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rdi
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ movq (%rdx), %rbp
+ movq %rdi, %rdx
+ mulxq %rbp, %rdi, %rbx
+ movq %rax, %rdx
+ mulxq %rbp, %r8, %r14
+ movq 16(%rsi), %rdx
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ addq %rdi, %r14
+ mulxq %rbp, %r15, %rax
+ adcq %rbx, %r15
+ movq 24(%rsi), %rdx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ mulxq %rbp, %rbx, %rdi
+ adcq %rax, %rbx
+ movq 32(%rsi), %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ mulxq %rbp, %r11, %rax
+ adcq %rdi, %r11
+ movq 40(%rsi), %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ mulxq %rbp, %r9, %rdi
+ adcq %rax, %r9
+ movq 48(%rsi), %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ mulxq %rbp, %r10, %rbp
+ adcq %rdi, %r10
+ adcq $0, %rbp
+ movq -8(%rcx), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %r8, %rdx
+ imulq %rax, %rdx
+ movq (%rcx), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulxq %rax, %rax, %rsi
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ addq %r8, %rax
+ movq 8(%rcx), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulxq %rax, %r8, %rsi
+ movq %rsi, -120(%rsp) ## 8-byte Spill
+ adcq %r14, %r8
+ movq 16(%rcx), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ mulxq %rax, %rsi, %r13
+ adcq %r15, %rsi
+ movq 24(%rcx), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ mulxq %rax, %r12, %rax
+ adcq %rbx, %r12
+ movq 32(%rcx), %rdi
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ mulxq %rdi, %r15, %rbx
+ adcq %r11, %r15
+ movq 40(%rcx), %rdi
+ movq %rdi, -16(%rsp) ## 8-byte Spill
+ mulxq %rdi, %r14, %rdi
+ adcq %r9, %r14
+ movq 48(%rcx), %rcx
+ movq %rcx, -56(%rsp) ## 8-byte Spill
+ mulxq %rcx, %r11, %rcx
+ adcq %r10, %r11
+ adcq $0, %rbp
+ addq -128(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -128(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -120(%rsp) ## 8-byte Spill
+ adcq %r13, %r12
+ adcq %rax, %r15
+ adcq %rbx, %r14
+ adcq %rdi, %r11
+ adcq %rcx, %rbp
+ movq -88(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ mulxq -96(%rsp), %rcx, %rsi ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r13, %rax ## 8-byte Folded Reload
+ addq %rcx, %rax
+ mulxq -104(%rsp), %rcx, %rdi ## 8-byte Folded Reload
+ adcq %rsi, %rcx
+ mulxq -24(%rsp), %rsi, %r8 ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -32(%rsp), %rdi, %r9 ## 8-byte Folded Reload
+ adcq %r8, %rdi
+ mulxq -40(%rsp), %r8, %rbx ## 8-byte Folded Reload
+ adcq %r9, %r8
+ mulxq -48(%rsp), %r9, %r10 ## 8-byte Folded Reload
+ adcq %rbx, %r9
+ adcq $0, %r10
+ addq -128(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rax ## 8-byte Folded Reload
+ adcq %r12, %rcx
+ adcq %r15, %rsi
+ adcq %r14, %rdi
+ adcq %r11, %r8
+ adcq %rbp, %r9
+ adcq $0, %r10
+ movq %r13, %rdx
+ imulq -80(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rbp, %rbx ## 8-byte Folded Reload
+ movq %rbx, -128(%rsp) ## 8-byte Spill
+ addq %r13, %rbp
+ mulxq -72(%rsp), %rbp, %r14 ## 8-byte Folded Reload
+ adcq %rax, %rbp
+ mulxq 8(%rsp), %rax, %r11 ## 8-byte Folded Reload
+ adcq %rcx, %rax
+ mulxq (%rsp), %r12, %rcx ## 8-byte Folded Reload
+ adcq %rsi, %r12
+ mulxq -8(%rsp), %r15, %rbx ## 8-byte Folded Reload
+ adcq %rdi, %r15
+ mulxq -16(%rsp), %r13, %rdi ## 8-byte Folded Reload
+ adcq %r8, %r13
+ mulxq -56(%rsp), %rsi, %rdx ## 8-byte Folded Reload
+ adcq %r9, %rsi
+ adcq $0, %r10
+ addq -128(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, -128(%rsp) ## 8-byte Spill
+ adcq %r14, %rax
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq %r11, %r12
+ adcq %rcx, %r15
+ adcq %rbx, %r13
+ adcq %rdi, %rsi
+ adcq %rdx, %r10
+ movq -88(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ mulxq -96(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r14, %rdi ## 8-byte Folded Reload
+ addq %rcx, %rdi
+ mulxq -104(%rsp), %rbp, %rcx ## 8-byte Folded Reload
+ adcq %rax, %rbp
+ mulxq -24(%rsp), %rbx, %r8 ## 8-byte Folded Reload
+ adcq %rcx, %rbx
+ mulxq -32(%rsp), %rax, %r9 ## 8-byte Folded Reload
+ adcq %r8, %rax
+ mulxq -40(%rsp), %r8, %rcx ## 8-byte Folded Reload
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq %r9, %r8
+ mulxq -48(%rsp), %r9, %r11 ## 8-byte Folded Reload
+ adcq 16(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r11
+ addq -128(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rdi ## 8-byte Folded Reload
+ adcq %r12, %rbp
+ adcq %r15, %rbx
+ adcq %r13, %rax
+ adcq %rsi, %r8
+ adcq %r10, %r9
+ adcq $0, %r11
+ movq %r14, %rdx
+ imulq -80(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ addq %r14, %rsi
+ mulxq -72(%rsp), %rsi, %r13 ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq 8(%rsp), %rdi, %r15 ## 8-byte Folded Reload
+ adcq %rbp, %rdi
+ mulxq (%rsp), %rcx, %rbp ## 8-byte Folded Reload
+ adcq %rbx, %rcx
+ mulxq -8(%rsp), %r14, %rbx ## 8-byte Folded Reload
+ adcq %rax, %r14
+ mulxq -16(%rsp), %r12, %rax ## 8-byte Folded Reload
+ adcq %r8, %r12
+ mulxq -56(%rsp), %r10, %rdx ## 8-byte Folded Reload
+ adcq %r9, %r10
+ adcq $0, %r11
+ addq -128(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ adcq %r13, %rdi
+ movq %rdi, -120(%rsp) ## 8-byte Spill
+ adcq %r15, %rcx
+ adcq %rbp, %r14
+ adcq %rbx, %r12
+ adcq %rax, %r10
+ adcq %rdx, %r11
+ movq -88(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ mulxq -96(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r15, %rbp ## 8-byte Folded Reload
+ addq %rsi, %rbp
+ mulxq -104(%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -24(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -32(%rsp), %rdi, %r9 ## 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -40(%rsp), %r8, %rax ## 8-byte Folded Reload
+ adcq %r9, %r8
+ mulxq -48(%rsp), %r9, %r13 ## 8-byte Folded Reload
+ adcq %rax, %r9
+ adcq $0, %r13
+ addq -128(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rbp ## 8-byte Folded Reload
+ adcq %rcx, %rbx
+ adcq %r14, %rsi
+ adcq %r12, %rdi
+ adcq %r10, %r8
+ adcq %r11, %r9
+ adcq $0, %r13
+ movq %r15, %rdx
+ imulq -80(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ addq %r15, %rcx
+ mulxq -72(%rsp), %rcx, %r11 ## 8-byte Folded Reload
+ adcq %rbp, %rcx
+ mulxq 8(%rsp), %rbp, %r10 ## 8-byte Folded Reload
+ adcq %rbx, %rbp
+ mulxq (%rsp), %rax, %rbx ## 8-byte Folded Reload
+ adcq %rsi, %rax
+ mulxq -8(%rsp), %r14, %rsi ## 8-byte Folded Reload
+ adcq %rdi, %r14
+ mulxq -16(%rsp), %r15, %rdi ## 8-byte Folded Reload
+ adcq %r8, %r15
+ mulxq -56(%rsp), %r12, %rdx ## 8-byte Folded Reload
+ adcq %r9, %r12
+ adcq $0, %r13
+ addq -128(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r11, %rbp
+ movq %rbp, -128(%rsp) ## 8-byte Spill
+ adcq %r10, %rax
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq %rbx, %r14
+ adcq %rsi, %r15
+ adcq %rdi, %r12
+ adcq %rdx, %r13
+ movq -88(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ mulxq -96(%rsp), %rsi, %rdi ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r11, %r8 ## 8-byte Folded Reload
+ addq %rsi, %r8
+ mulxq -104(%rsp), %rbx, %rsi ## 8-byte Folded Reload
+ adcq %rdi, %rbx
+ mulxq -24(%rsp), %rbp, %rdi ## 8-byte Folded Reload
+ adcq %rsi, %rbp
+ mulxq -32(%rsp), %rsi, %r9 ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -40(%rsp), %rdi, %rax ## 8-byte Folded Reload
+ adcq %r9, %rdi
+ mulxq -48(%rsp), %r9, %r10 ## 8-byte Folded Reload
+ adcq %rax, %r9
+ adcq $0, %r10
+ addq %rcx, %r11
+ adcq -128(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %r14, %rbp
+ adcq %r15, %rsi
+ adcq %r12, %rdi
+ adcq %r13, %r9
+ adcq $0, %r10
+ movq %r11, %rdx
+ imulq -80(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ addq %r11, %rcx
+ mulxq -72(%rsp), %rcx, %r13 ## 8-byte Folded Reload
+ adcq %r8, %rcx
+ mulxq 8(%rsp), %rax, %r8 ## 8-byte Folded Reload
+ adcq %rbx, %rax
+ mulxq (%rsp), %rbx, %r11 ## 8-byte Folded Reload
+ adcq %rbp, %rbx
+ mulxq -8(%rsp), %r14, %rbp ## 8-byte Folded Reload
+ adcq %rsi, %r14
+ mulxq -16(%rsp), %r15, %rsi ## 8-byte Folded Reload
+ adcq %rdi, %r15
+ mulxq -56(%rsp), %r12, %rdx ## 8-byte Folded Reload
+ adcq %r9, %r12
+ adcq $0, %r10
+ addq -128(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r13, %rax
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ adcq %r8, %rbx
+ movq %rbx, -120(%rsp) ## 8-byte Spill
+ adcq %r11, %r14
+ adcq %rbp, %r15
+ adcq %rsi, %r12
+ adcq %rdx, %r10
+ movq -88(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ mulxq -96(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r11, %rbp ## 8-byte Folded Reload
+ addq %rsi, %rbp
+ mulxq -104(%rsp), %rbx, %rdi ## 8-byte Folded Reload
+ adcq %rax, %rbx
+ mulxq -24(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ adcq %rdi, %rsi
+ mulxq -32(%rsp), %rdi, %r9 ## 8-byte Folded Reload
+ adcq %rax, %rdi
+ mulxq -40(%rsp), %r8, %rax ## 8-byte Folded Reload
+ adcq %r9, %r8
+ mulxq -48(%rsp), %r9, %r13 ## 8-byte Folded Reload
+ adcq %rax, %r9
+ adcq $0, %r13
+ addq %rcx, %r11
+ adcq -128(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -120(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %r14, %rsi
+ adcq %r15, %rdi
+ adcq %r12, %r8
+ adcq %r10, %r9
+ adcq $0, %r13
+ movq %r11, %rdx
+ imulq -80(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -64(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ addq %r11, %rcx
+ mulxq -72(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ adcq %rbp, %rcx
+ mulxq 8(%rsp), %rax, %rbp ## 8-byte Folded Reload
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq %rbx, %rax
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ mulxq (%rsp), %r14, %rbp ## 8-byte Folded Reload
+ adcq %rsi, %r14
+ mulxq -8(%rsp), %r11, %r12 ## 8-byte Folded Reload
+ adcq %rdi, %r11
+ mulxq -16(%rsp), %r10, %rbx ## 8-byte Folded Reload
+ adcq %r8, %r10
+ mulxq -56(%rsp), %rdi, %rax ## 8-byte Folded Reload
+ adcq %r9, %rdi
+ adcq $0, %r13
+ addq -120(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ movq -128(%rsp), %rcx ## 8-byte Reload
+ adcq 16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %r14 ## 8-byte Folded Reload
+ adcq %rbp, %r11
+ adcq %r12, %r10
+ adcq %rbx, %rdi
+ adcq %rax, %r13
+ movq -88(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ mulxq -96(%rsp), %rbp, %r9 ## 8-byte Folded Reload
+ mulxq -112(%rsp), %r8, %rax ## 8-byte Folded Reload
+ addq %rbp, %rax
+ mulxq -104(%rsp), %rbx, %rcx ## 8-byte Folded Reload
+ adcq %r9, %rbx
+ mulxq -24(%rsp), %rbp, %r9 ## 8-byte Folded Reload
+ adcq %rcx, %rbp
+ mulxq -32(%rsp), %rcx, %r12 ## 8-byte Folded Reload
+ adcq %r9, %rcx
+ mulxq -40(%rsp), %r15, %rsi ## 8-byte Folded Reload
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ adcq %r12, %r15
+ mulxq -48(%rsp), %r12, %r9 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r12 ## 8-byte Folded Reload
+ adcq $0, %r9
+ addq -120(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ adcq %r14, %rbx
+ adcq %r11, %rbp
+ adcq %r10, %rcx
+ adcq %rdi, %r15
+ adcq %r13, %r12
+ adcq $0, %r9
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ imulq %r8, %rdx
+ mulxq -64(%rsp), %rdi, %rsi ## 8-byte Folded Reload
+ movq %rsi, -80(%rsp) ## 8-byte Spill
+ addq %r8, %rdi
+ mulxq -72(%rsp), %r8, %rsi ## 8-byte Folded Reload
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ adcq %rax, %r8
+ movq 8(%rsp), %r11 ## 8-byte Reload
+ mulxq %r11, %rsi, %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ adcq %rbx, %rsi
+ movq (%rsp), %r14 ## 8-byte Reload
+ mulxq %r14, %rdi, %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ adcq %rbp, %rdi
+ movq -8(%rsp), %rbp ## 8-byte Reload
+ mulxq %rbp, %rax, %rbx
+ movq %rbx, -104(%rsp) ## 8-byte Spill
+ adcq %rcx, %rax
+ movq -16(%rsp), %rbx ## 8-byte Reload
+ mulxq %rbx, %rcx, %r13
+ adcq %r15, %rcx
+ mulxq -56(%rsp), %rdx, %r15 ## 8-byte Folded Reload
+ adcq %r12, %rdx
+ adcq $0, %r9
+ addq -80(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -88(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ adcq -104(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r13, %rdx
+ adcq %r15, %r9
+ movq %r8, %r13
+ subq -64(%rsp), %r13 ## 8-byte Folded Reload
+ movq %rsi, %r12
+ sbbq -72(%rsp), %r12 ## 8-byte Folded Reload
+ movq %rdi, %r10
+ sbbq %r11, %r10
+ movq %rax, %r11
+ sbbq %r14, %r11
+ movq %rcx, %r14
+ sbbq %rbp, %r14
+ movq %rdx, %r15
+ sbbq %rbx, %r15
+ movq %r9, %rbp
+ sbbq -56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, %rbx
+ sarq $63, %rbx
+ cmovsq %r8, %r13
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ movq %r13, (%rbx)
+ cmovsq %rsi, %r12
+ movq %r12, 8(%rbx)
+ cmovsq %rdi, %r10
+ movq %r10, 16(%rbx)
+ cmovsq %rax, %r11
+ movq %r11, 24(%rbx)
+ cmovsq %rcx, %r14
+ movq %r14, 32(%rbx)
+ cmovsq %rdx, %r15
+ movq %r15, 40(%rbx)
+ cmovsq %r9, %rbp
+ movq %rbp, 48(%rbx)
+ addq $40, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed7Lbmi2: ## @mcl_fp_montRed7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $56, %rsp
+ movq %rdx, %rcx
+ movq %rdi, 48(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq (%rsi), %r13
+ movq %r13, %rdx
+ imulq %rax, %rdx
+ movq 48(%rcx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ mulxq %rax, %rdi, %rax
+ movq %rdi, -64(%rsp) ## 8-byte Spill
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ mulxq %rax, %r10, %rax
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulxq %rax, %r14, %r8
+ movq 24(%rcx), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ mulxq %rax, %r12, %r15
+ movq 16(%rcx), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulxq %rax, %rbp, %rbx
+ movq (%rcx), %rdi
+ movq %rdi, -48(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ mulxq %rax, %rax, %r11
+ mulxq %rdi, %rdx, %r9
+ addq %rax, %r9
+ adcq %rbp, %r11
+ adcq %r12, %rbx
+ adcq %r14, %r15
+ adcq %r10, %r8
+ movq -128(%rsp), %rcx ## 8-byte Reload
+ adcq -64(%rsp), %rcx ## 8-byte Folded Reload
+ movq -120(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %r13, %rdx
+ adcq 8(%rsi), %r9
+ adcq 16(%rsi), %r11
+ adcq 24(%rsi), %rbx
+ adcq 32(%rsi), %r15
+ adcq 40(%rsi), %r8
+ movq %r8, -112(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %rcx
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ adcq 56(%rsi), %rax
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq 104(%rsi), %r8
+ movq 96(%rsi), %rdx
+ movq 88(%rsi), %rdi
+ movq 80(%rsi), %rbp
+ movq 72(%rsi), %rax
+ movq 64(%rsi), %rcx
+ adcq $0, %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ adcq $0, %rbp
+ movq %rbp, -56(%rsp) ## 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -80(%rsp) ## 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, -64(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq %r9, %rdx
+ imulq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq -16(%rsp), %r13 ## 8-byte Reload
+ mulxq %r13, %rcx, %rax
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ mulxq -24(%rsp), %rcx, %rax ## 8-byte Folded Reload
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %r14, %r12 ## 8-byte Folded Reload
+ mulxq 16(%rsp), %r8, %rax ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rsi, %r10 ## 8-byte Folded Reload
+ mulxq -8(%rsp), %rcx, %rdi ## 8-byte Folded Reload
+ mulxq -48(%rsp), %rdx, %rbp ## 8-byte Folded Reload
+ addq %rcx, %rbp
+ adcq %rsi, %rdi
+ adcq %r8, %r10
+ adcq %r14, %rax
+ movq %rax, %rcx
+ adcq 40(%rsp), %r12 ## 8-byte Folded Reload
+ movq -104(%rsp), %rsi ## 8-byte Reload
+ adcq 32(%rsp), %rsi ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %r9, %rdx
+ adcq %r11, %rbp
+ adcq %rbx, %rdi
+ adcq %r15, %r10
+ adcq -112(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -104(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ adcq $0, -88(%rsp) ## 8-byte Folded Spill
+ adcq $0, -56(%rsp) ## 8-byte Folded Spill
+ adcq $0, -80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 24(%rsp) ## 8-byte Folded Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, (%rsp) ## 8-byte Folded Spill
+ movq %rbp, %rdx
+ imulq -72(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq %r13, %rcx, %rax
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq -24(%rsp), %r15 ## 8-byte Reload
+ mulxq %r15, %rcx, %rax
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %r11, %r13 ## 8-byte Folded Reload
+ mulxq 16(%rsp), %r9, %r14 ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rsi, %r8 ## 8-byte Folded Reload
+ mulxq -8(%rsp), %rax, %rbx ## 8-byte Folded Reload
+ mulxq -48(%rsp), %rdx, %rcx ## 8-byte Folded Reload
+ addq %rax, %rcx
+ adcq %rsi, %rbx
+ adcq %r9, %r8
+ adcq %r11, %r14
+ adcq 32(%rsp), %r13 ## 8-byte Folded Reload
+ movq -128(%rsp), %rsi ## 8-byte Reload
+ adcq 8(%rsp), %rsi ## 8-byte Folded Reload
+ movq -120(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rbp, %rdx
+ adcq %rdi, %rcx
+ adcq %r10, %rbx
+ adcq -112(%rsp), %r8 ## 8-byte Folded Reload
+ adcq %r12, %r14
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ adcq -88(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq $0, -56(%rsp) ## 8-byte Folded Spill
+ adcq $0, -80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 24(%rsp) ## 8-byte Folded Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, (%rsp) ## 8-byte Folded Spill
+ movq %rcx, %rdx
+ imulq -72(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -16(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ movq %rsi, -88(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ mulxq %r15, %rsi, %rax
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq -32(%rsp), %r15 ## 8-byte Reload
+ mulxq %r15, %rax, %r12
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ mulxq 16(%rsp), %r9, %rbp ## 8-byte Folded Reload
+ mulxq -40(%rsp), %rdi, %r10 ## 8-byte Folded Reload
+ mulxq -8(%rsp), %rsi, %r11 ## 8-byte Folded Reload
+ mulxq -48(%rsp), %rdx, %rax ## 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %rdi, %r11
+ adcq %r9, %r10
+ adcq 8(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -112(%rsp), %r12 ## 8-byte Folded Reload
+ movq -104(%rsp), %rdi ## 8-byte Reload
+ adcq -88(%rsp), %rdi ## 8-byte Folded Reload
+ movq -96(%rsp), %rsi ## 8-byte Reload
+ adcq $0, %rsi
+ addq %rcx, %rdx
+ adcq %rbx, %rax
+ adcq %r8, %r11
+ adcq %r14, %r10
+ adcq %r13, %rbp
+ adcq -128(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -104(%rsp) ## 8-byte Spill
+ adcq -56(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -96(%rsp) ## 8-byte Spill
+ adcq $0, -80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 24(%rsp) ## 8-byte Folded Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, (%rsp) ## 8-byte Folded Spill
+ movq %rax, %rdx
+ imulq -72(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -16(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ movq %rcx, -56(%rsp) ## 8-byte Spill
+ mulxq -24(%rsp), %rsi, %rcx ## 8-byte Folded Reload
+ movq %rsi, -88(%rsp) ## 8-byte Spill
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ mulxq %r15, %rcx, %r13
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r15 ## 8-byte Reload
+ mulxq %r15, %r9, %r14
+ mulxq -40(%rsp), %rdi, %rbx ## 8-byte Folded Reload
+ mulxq -8(%rsp), %rsi, %r8 ## 8-byte Folded Reload
+ mulxq -48(%rsp), %rdx, %rcx ## 8-byte Folded Reload
+ addq %rsi, %rcx
+ adcq %rdi, %r8
+ adcq %r9, %rbx
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r13 ## 8-byte Folded Reload
+ movq -120(%rsp), %rdi ## 8-byte Reload
+ adcq -128(%rsp), %rdi ## 8-byte Folded Reload
+ movq -56(%rsp), %rsi ## 8-byte Reload
+ adcq $0, %rsi
+ addq %rax, %rdx
+ adcq %r11, %rcx
+ adcq %r10, %r8
+ adcq %rbp, %rbx
+ adcq %r12, %r14
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -120(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -56(%rsp) ## 8-byte Spill
+ adcq $0, 24(%rsp) ## 8-byte Folded Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, (%rsp) ## 8-byte Folded Spill
+ movq %rcx, %rdx
+ imulq -72(%rsp), %rdx ## 8-byte Folded Reload
+ mulxq -16(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ movq %rsi, -96(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulxq -24(%rsp), %rsi, %rax ## 8-byte Folded Reload
+ movq %rsi, -104(%rsp) ## 8-byte Spill
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ mulxq -32(%rsp), %rax, %r12 ## 8-byte Folded Reload
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %r15, %r11
+ mulxq %r11, %rax, %r15
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulxq -40(%rsp), %rdi, %rbp ## 8-byte Folded Reload
+ movq -8(%rsp), %r9 ## 8-byte Reload
+ mulxq %r9, %rax, %r10
+ mulxq -48(%rsp), %rdx, %rsi ## 8-byte Folded Reload
+ addq %rax, %rsi
+ adcq %rdi, %r10
+ adcq -112(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -88(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r12 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdi ## 8-byte Reload
+ adcq -96(%rsp), %rdi ## 8-byte Folded Reload
+ movq -80(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rcx, %rdx
+ adcq %r8, %rsi
+ adcq %rbx, %r10
+ adcq %r14, %rbp
+ adcq %r13, %r15
+ adcq -120(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -56(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -128(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, (%rsp) ## 8-byte Folded Spill
+ movq -72(%rsp), %rdx ## 8-byte Reload
+ imulq %rsi, %rdx
+ mulxq %r11, %rcx, %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulxq %r9, %rbx, %rdi
+ mulxq -48(%rsp), %r11, %r14 ## 8-byte Folded Reload
+ addq %rbx, %r14
+ mulxq -40(%rsp), %rbx, %r13 ## 8-byte Folded Reload
+ adcq %rdi, %rbx
+ adcq %rcx, %r13
+ mulxq -32(%rsp), %r8, %rdi ## 8-byte Folded Reload
+ adcq -72(%rsp), %r8 ## 8-byte Folded Reload
+ mulxq -24(%rsp), %rcx, %r9 ## 8-byte Folded Reload
+ adcq %rdi, %rcx
+ mulxq -16(%rsp), %rdx, %rdi ## 8-byte Folded Reload
+ adcq %r9, %rdx
+ adcq $0, %rdi
+ addq %rsi, %r11
+ adcq %r10, %r14
+ adcq %rbp, %rbx
+ adcq %r15, %r13
+ adcq %r12, %r8
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -80(%rsp), %rdx ## 8-byte Folded Reload
+ adcq -64(%rsp), %rdi ## 8-byte Folded Reload
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ movq %r14, %rsi
+ subq -48(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rbx, %rbp
+ sbbq -8(%rsp), %rbp ## 8-byte Folded Reload
+ movq %r13, %r9
+ sbbq -40(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r8, %r10
+ sbbq 16(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rcx, %r11
+ sbbq -32(%rsp), %r11 ## 8-byte Folded Reload
+ movq %rdx, %r15
+ sbbq -24(%rsp), %r15 ## 8-byte Folded Reload
+ movq %rdi, %r12
+ sbbq -16(%rsp), %r12 ## 8-byte Folded Reload
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rdi, %r12
+ testb %al, %al
+ cmovneq %r14, %rsi
+ movq 48(%rsp), %rdi ## 8-byte Reload
+ movq %rsi, (%rdi)
+ cmovneq %rbx, %rbp
+ movq %rbp, 8(%rdi)
+ cmovneq %r13, %r9
+ movq %r9, 16(%rdi)
+ cmovneq %r8, %r10
+ movq %r10, 24(%rdi)
+ cmovneq %rcx, %r11
+ movq %r11, 32(%rdi)
+ cmovneq %rdx, %r15
+ movq %r15, 40(%rdi)
+ movq %r12, 48(%rdi)
+ addq $56, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre7Lbmi2: ## @mcl_fp_addPre7Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r8
+ movq 48(%rsi), %r14
+ movq 40(%rdx), %r9
+ movq 40(%rsi), %r15
+ movq 32(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r12
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rbx
+ adcq 16(%rsi), %r12
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r12, 16(%rdi)
+ adcq %r11, %rax
+ movq %rax, 24(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r9, %r15
+ movq %r15, 40(%rdi)
+ adcq %r8, %r14
+ movq %r14, 48(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subPre7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre7Lbmi2: ## @mcl_fp_subPre7Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r8
+ movq 48(%rsi), %r10
+ movq 40(%rdx), %r9
+ movq 40(%rsi), %r15
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %r12
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %r12
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq 32(%rsi), %rdx
+ movq 24(%rsi), %rsi
+ movq %rbx, (%rdi)
+ movq %r12, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ sbbq %r14, %rdx
+ movq %rdx, 32(%rdi)
+ sbbq %r9, %r15
+ movq %r15, 40(%rdi)
+ sbbq %r8, %r10
+ movq %r10, 48(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_shr1_7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_7Lbmi2: ## @mcl_fp_shr1_7Lbmi2
+## BB#0:
+ movq 48(%rsi), %r8
+ movq 40(%rsi), %r9
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %rax
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rdx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rdx
+ movq %rdx, (%rdi)
+ shrdq $1, %rcx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rax, %rcx
+ movq %rcx, 16(%rdi)
+ shrdq $1, %r10, %rax
+ movq %rax, 24(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 32(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 40(%rdi)
+ shrq %r8
+ movq %r8, 48(%rdi)
+ retq
+
+ .globl _mcl_fp_add7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add7Lbmi2: ## @mcl_fp_add7Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r14
+ movq 48(%rsi), %r8
+ movq 40(%rdx), %r15
+ movq 40(%rsi), %r9
+ movq 32(%rdx), %r12
+ movq 24(%rdx), %r13
+ movq 16(%rdx), %r10
+ movq (%rdx), %r11
+ movq 8(%rdx), %rdx
+ addq (%rsi), %r11
+ adcq 8(%rsi), %rdx
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rbx
+ adcq 16(%rsi), %r10
+ movq %r11, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ adcq %r13, %rax
+ movq %rax, 24(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r15, %r9
+ movq %r9, 40(%rdi)
+ adcq %r14, %r8
+ movq %r8, 48(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %r11
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r10
+ sbbq 24(%rcx), %rax
+ sbbq 32(%rcx), %rbx
+ sbbq 40(%rcx), %r9
+ sbbq 48(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB104_2
+## BB#1: ## %nocarry
+ movq %r11, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %rax, 24(%rdi)
+ movq %rbx, 32(%rdi)
+ movq %r9, 40(%rdi)
+ movq %r8, 48(%rdi)
+LBB104_2: ## %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addNF7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF7Lbmi2: ## @mcl_fp_addNF7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rbp
+ movq 32(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r14
+ movq (%rdx), %r12
+ movq 8(%rdx), %r15
+ addq (%rsi), %r12
+ adcq 8(%rsi), %r15
+ adcq 16(%rsi), %r14
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %r10
+ adcq 40(%rsi), %rbp
+ movq %rbp, -8(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %r9
+ movq %r12, %rsi
+ subq (%rcx), %rsi
+ movq %r15, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r14, %rax
+ sbbq 16(%rcx), %rax
+ movq %r11, %rbx
+ sbbq 24(%rcx), %rbx
+ movq %r10, %r13
+ sbbq 32(%rcx), %r13
+ sbbq 40(%rcx), %rbp
+ movq %r9, %r8
+ sbbq 48(%rcx), %r8
+ movq %r8, %rcx
+ sarq $63, %rcx
+ cmovsq %r12, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r15, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r14, %rax
+ movq %rax, 16(%rdi)
+ cmovsq %r11, %rbx
+ movq %rbx, 24(%rdi)
+ cmovsq %r10, %r13
+ movq %r13, 32(%rdi)
+ cmovsq -8(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 40(%rdi)
+ cmovsq %r9, %r8
+ movq %r8, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_sub7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub7Lbmi2: ## @mcl_fp_sub7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r14
+ movq 48(%rsi), %r8
+ movq 40(%rdx), %r15
+ movq 40(%rsi), %r9
+ movq 32(%rdx), %r12
+ movq (%rsi), %rax
+ movq 8(%rsi), %r11
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r11
+ movq 16(%rsi), %r13
+ sbbq 16(%rdx), %r13
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %rsi
+ sbbq 24(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r11, 8(%rdi)
+ movq %r13, 16(%rdi)
+ movq %rsi, 24(%rdi)
+ sbbq %r12, %r10
+ movq %r10, 32(%rdi)
+ sbbq %r15, %r9
+ movq %r9, 40(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 48(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB106_2
+## BB#1: ## %carry
+ movq 48(%rcx), %r14
+ movq 40(%rcx), %r15
+ movq 32(%rcx), %r12
+ movq 24(%rcx), %rbx
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rbp
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r13, %rbp
+ movq %rbp, 16(%rdi)
+ adcq %rsi, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r10, %r12
+ movq %r12, 32(%rdi)
+ adcq %r9, %r15
+ movq %r15, 40(%rdi)
+ adcq %r8, %r14
+ movq %r14, 48(%rdi)
+LBB106_2: ## %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_subNF7Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF7Lbmi2: ## @mcl_fp_subNF7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 48(%rsi), %r11
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ movdqu 32(%rdx), %xmm2
+ pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1]
+ movd %xmm3, %r14
+ movdqu (%rsi), %xmm3
+ movdqu 16(%rsi), %xmm4
+ movdqu 32(%rsi), %xmm5
+ pshufd $78, %xmm5, %xmm6 ## xmm6 = xmm5[2,3,0,1]
+ movd %xmm6, %rcx
+ movd %xmm2, %r15
+ movd %xmm5, %r9
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r12
+ pshufd $78, %xmm4, %xmm2 ## xmm2 = xmm4[2,3,0,1]
+ movd %xmm2, %r10
+ movd %xmm1, %r13
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %rax
+ pshufd $78, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,0,1]
+ movd %xmm0, %rbx
+ movd %xmm3, %rsi
+ subq %rbx, %rsi
+ movd %xmm1, %rbx
+ sbbq %rax, %rbx
+ movd %xmm4, %rbp
+ sbbq %r13, %rbp
+ sbbq %r12, %r10
+ sbbq %r15, %r9
+ sbbq %r14, %rcx
+ movq %rcx, -8(%rsp) ## 8-byte Spill
+ sbbq 48(%rdx), %r11
+ movq %r11, %rax
+ sarq $63, %rax
+ movq %rax, %rdx
+ shldq $1, %r11, %rdx
+ andq (%r8), %rdx
+ movq 48(%r8), %r14
+ andq %rax, %r14
+ movq 40(%r8), %r15
+ andq %rax, %r15
+ movq 32(%r8), %r12
+ andq %rax, %r12
+ movq 24(%r8), %r13
+ andq %rax, %r13
+ movq 16(%r8), %rcx
+ andq %rax, %rcx
+ andq 8(%r8), %rax
+ addq %rsi, %rdx
+ adcq %rbx, %rax
+ movq %rdx, (%rdi)
+ movq %rax, 8(%rdi)
+ adcq %rbp, %rcx
+ movq %rcx, 16(%rdi)
+ adcq %r10, %r13
+ movq %r13, 24(%rdi)
+ adcq %r9, %r12
+ movq %r12, 32(%rdi)
+ adcq -8(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, 40(%rdi)
+ adcq %r11, %r14
+ movq %r14, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_add7Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add7Lbmi2: ## @mcl_fpDbl_add7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 104(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 96(%rdx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq 88(%rdx), %r11
+ movq 80(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r12
+ movq 16(%rdx), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %rbx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %r9
+ adcq 24(%rdx), %r15
+ adcq 32(%rdx), %r12
+ movq 72(%rdx), %r13
+ movq 64(%rdx), %rbp
+ movq %rax, (%rdi)
+ movq 56(%rdx), %r10
+ movq %rbx, 8(%rdi)
+ movq 48(%rdx), %rcx
+ movq 40(%rdx), %rdx
+ movq %r9, 16(%rdi)
+ movq 104(%rsi), %r9
+ movq %r15, 24(%rdi)
+ movq 40(%rsi), %rbx
+ adcq %rdx, %rbx
+ movq 96(%rsi), %r15
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %rdx
+ adcq %rcx, %rdx
+ movq 88(%rsi), %rax
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rcx
+ adcq %r10, %rcx
+ movq 80(%rsi), %r12
+ movq %rdx, 48(%rdi)
+ movq 72(%rsi), %rdx
+ movq 64(%rsi), %rsi
+ adcq %rbp, %rsi
+ adcq %r13, %rdx
+ adcq %r14, %r12
+ adcq %r11, %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ adcq -24(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, -24(%rsp) ## 8-byte Spill
+ adcq -8(%rsp), %r9 ## 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq %rcx, %rbx
+ subq (%r8), %rbx
+ movq %rsi, %r10
+ sbbq 8(%r8), %r10
+ movq %rdx, %r11
+ sbbq 16(%r8), %r11
+ movq %r12, %r14
+ sbbq 24(%r8), %r14
+ movq -16(%rsp), %r13 ## 8-byte Reload
+ sbbq 32(%r8), %r13
+ sbbq 40(%r8), %r15
+ movq %r9, %rax
+ sbbq 48(%r8), %rax
+ sbbq $0, %rbp
+ andl $1, %ebp
+ cmovneq %rcx, %rbx
+ movq %rbx, 56(%rdi)
+ testb %bpl, %bpl
+ cmovneq %rsi, %r10
+ movq %r10, 64(%rdi)
+ cmovneq %rdx, %r11
+ movq %r11, 72(%rdi)
+ cmovneq %r12, %r14
+ movq %r14, 80(%rdi)
+ cmovneq -16(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 88(%rdi)
+ cmovneq -24(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, 96(%rdi)
+ cmovneq %r9, %rax
+ movq %rax, 104(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub7Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub7Lbmi2: ## @mcl_fpDbl_sub7Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 104(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 96(%rdx), %r10
+ movq 88(%rdx), %r14
+ movq 16(%rsi), %rax
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %ecx, %ecx
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %rax
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 80(%rdx), %r13
+ movq 72(%rdx), %rbp
+ movq %r15, (%rdi)
+ movq 64(%rdx), %r9
+ movq %r11, 8(%rdi)
+ movq 56(%rdx), %r15
+ movq %rax, 16(%rdi)
+ movq 48(%rdx), %r11
+ movq 40(%rdx), %rdx
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %rdx, %rbx
+ movq 104(%rsi), %rax
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %r12
+ sbbq %r11, %r12
+ movq 96(%rsi), %r11
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rdx
+ sbbq %r15, %rdx
+ movq 88(%rsi), %r15
+ movq %r12, 48(%rdi)
+ movq 64(%rsi), %rbx
+ sbbq %r9, %rbx
+ movq 80(%rsi), %r12
+ movq 72(%rsi), %r9
+ sbbq %rbp, %r9
+ sbbq %r13, %r12
+ sbbq %r14, %r15
+ sbbq %r10, %r11
+ sbbq -8(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%r8), %r10
+ cmoveq %rcx, %r10
+ testb %bpl, %bpl
+ movq 16(%r8), %rbp
+ cmoveq %rcx, %rbp
+ movq 8(%r8), %rsi
+ cmoveq %rcx, %rsi
+ movq 48(%r8), %r14
+ cmoveq %rcx, %r14
+ movq 40(%r8), %r13
+ cmoveq %rcx, %r13
+ movq 32(%r8), %rax
+ cmoveq %rcx, %rax
+ cmovneq 24(%r8), %rcx
+ addq %rdx, %r10
+ adcq %rbx, %rsi
+ movq %r10, 56(%rdi)
+ movq %rsi, 64(%rdi)
+ adcq %r9, %rbp
+ movq %rbp, 72(%rdi)
+ adcq %r12, %rcx
+ movq %rcx, 80(%rdi)
+ adcq %r15, %rax
+ movq %rax, 88(%rdi)
+ adcq %r11, %r13
+ movq %r13, 96(%rdi)
+ adcq -8(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, 104(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .p2align 4, 0x90
+l_mulPv512x64: ## @mulPv512x64
+## BB#0:
+ mulxq (%rsi), %rcx, %rax
+ movq %rcx, (%rdi)
+ mulxq 8(%rsi), %rcx, %r8
+ addq %rax, %rcx
+ movq %rcx, 8(%rdi)
+ mulxq 16(%rsi), %rcx, %r9
+ adcq %r8, %rcx
+ movq %rcx, 16(%rdi)
+ mulxq 24(%rsi), %rax, %rcx
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ mulxq 32(%rsi), %rax, %r8
+ adcq %rcx, %rax
+ movq %rax, 32(%rdi)
+ mulxq 40(%rsi), %rcx, %r9
+ adcq %r8, %rcx
+ movq %rcx, 40(%rdi)
+ mulxq 48(%rsi), %rax, %rcx
+ adcq %r9, %rax
+ movq %rax, 48(%rdi)
+ mulxq 56(%rsi), %rax, %rdx
+ adcq %rcx, %rax
+ movq %rax, 56(%rdi)
+ adcq $0, %rdx
+ movq %rdx, 64(%rdi)
+ movq %rdi, %rax
+ retq
+
+ .globl _mcl_fp_mulUnitPre8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre8Lbmi2: ## @mcl_fp_mulUnitPre8Lbmi2
+## BB#0:
+ pushq %rbx
+ subq $80, %rsp
+ movq %rdi, %rbx
+ leaq 8(%rsp), %rdi
+ callq l_mulPv512x64
+ movq 72(%rsp), %r8
+ movq 64(%rsp), %r9
+ movq 56(%rsp), %r10
+ movq 48(%rsp), %r11
+ movq 40(%rsp), %rdi
+ movq 32(%rsp), %rax
+ movq 24(%rsp), %rcx
+ movq 8(%rsp), %rdx
+ movq 16(%rsp), %rsi
+ movq %rdx, (%rbx)
+ movq %rsi, 8(%rbx)
+ movq %rcx, 16(%rbx)
+ movq %rax, 24(%rbx)
+ movq %rdi, 32(%rbx)
+ movq %r11, 40(%rbx)
+ movq %r10, 48(%rbx)
+ movq %r9, 56(%rbx)
+ movq %r8, 64(%rbx)
+ addq $80, %rsp
+ popq %rbx
+ retq
+
+ .globl _mcl_fpDbl_mulPre8Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre8Lbmi2: ## @mcl_fpDbl_mulPre8Lbmi2
+## BB#0:
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $200, %rsp
+ movq %rdx, %r15
+ movq %rsi, %rbx
+ movq %rdi, %r14
+ callq _mcl_fpDbl_mulPre4Lbmi2
+ leaq 64(%r14), %rdi
+ leaq 32(%rbx), %rsi
+ leaq 32(%r15), %rdx
+ callq _mcl_fpDbl_mulPre4Lbmi2
+ movq 56(%rbx), %r10
+ movq 48(%rbx), %rdx
+ movq (%rbx), %rsi
+ movq 8(%rbx), %rdi
+ addq 32(%rbx), %rsi
+ adcq 40(%rbx), %rdi
+ adcq 16(%rbx), %rdx
+ adcq 24(%rbx), %r10
+ pushfq
+ popq %r8
+ xorl %r9d, %r9d
+ movq 56(%r15), %rcx
+ movq 48(%r15), %r13
+ movq (%r15), %r12
+ movq 8(%r15), %rbx
+ addq 32(%r15), %r12
+ adcq 40(%r15), %rbx
+ adcq 16(%r15), %r13
+ adcq 24(%r15), %rcx
+ movl $0, %eax
+ cmovbq %r10, %rax
+ movq %rax, -88(%rbp) ## 8-byte Spill
+ movl $0, %eax
+ cmovbq %rdx, %rax
+ movq %rax, -80(%rbp) ## 8-byte Spill
+ movl $0, %eax
+ cmovbq %rdi, %rax
+ movq %rax, -72(%rbp) ## 8-byte Spill
+ movl $0, %eax
+ cmovbq %rsi, %rax
+ movq %rax, -64(%rbp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq %rsi, -168(%rbp)
+ movq %rdi, -160(%rbp)
+ movq %rdx, -152(%rbp)
+ movq %r10, -144(%rbp)
+ movq %r12, -136(%rbp)
+ movq %rbx, -128(%rbp)
+ movq %r13, -120(%rbp)
+ movq %rcx, -112(%rbp)
+ pushq %r8
+ popfq
+ cmovaeq %r9, %rcx
+ movq %rcx, -48(%rbp) ## 8-byte Spill
+ cmovaeq %r9, %r13
+ cmovaeq %r9, %rbx
+ cmovaeq %r9, %r12
+ sbbq %rax, %rax
+ movq %rax, -56(%rbp) ## 8-byte Spill
+ leaq -232(%rbp), %rdi
+ leaq -168(%rbp), %rsi
+ leaq -136(%rbp), %rdx
+ callq _mcl_fpDbl_mulPre4Lbmi2
+ addq -64(%rbp), %r12 ## 8-byte Folded Reload
+ adcq -72(%rbp), %rbx ## 8-byte Folded Reload
+ adcq -80(%rbp), %r13 ## 8-byte Folded Reload
+ movq -48(%rbp), %r10 ## 8-byte Reload
+ adcq -88(%rbp), %r10 ## 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq -56(%rbp), %rdx ## 8-byte Reload
+ andl %edx, %r15d
+ andl $1, %r15d
+ addq -200(%rbp), %r12
+ adcq -192(%rbp), %rbx
+ adcq -184(%rbp), %r13
+ adcq -176(%rbp), %r10
+ adcq %rax, %r15
+ movq -208(%rbp), %rax
+ movq -216(%rbp), %rcx
+ movq -232(%rbp), %rsi
+ movq -224(%rbp), %rdx
+ subq (%r14), %rsi
+ sbbq 8(%r14), %rdx
+ sbbq 16(%r14), %rcx
+ sbbq 24(%r14), %rax
+ movq 32(%r14), %rdi
+ movq %rdi, -80(%rbp) ## 8-byte Spill
+ movq 40(%r14), %r8
+ movq %r8, -88(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r12
+ sbbq %r8, %rbx
+ movq 48(%r14), %rdi
+ movq %rdi, -72(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r13
+ movq 56(%r14), %rdi
+ movq %rdi, -64(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r10
+ sbbq $0, %r15
+ movq 64(%r14), %r11
+ subq %r11, %rsi
+ movq 72(%r14), %rdi
+ movq %rdi, -56(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rdx
+ movq 80(%r14), %rdi
+ movq %rdi, -48(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rcx
+ movq 88(%r14), %rdi
+ movq %rdi, -104(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rax
+ movq 96(%r14), %rdi
+ movq %rdi, -96(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r12
+ movq 104(%r14), %rdi
+ sbbq %rdi, %rbx
+ movq 112(%r14), %r8
+ sbbq %r8, %r13
+ movq 120(%r14), %r9
+ sbbq %r9, %r10
+ sbbq $0, %r15
+ addq -80(%rbp), %rsi ## 8-byte Folded Reload
+ adcq -88(%rbp), %rdx ## 8-byte Folded Reload
+ movq %rsi, 32(%r14)
+ adcq -72(%rbp), %rcx ## 8-byte Folded Reload
+ movq %rdx, 40(%r14)
+ adcq -64(%rbp), %rax ## 8-byte Folded Reload
+ movq %rcx, 48(%r14)
+ adcq %r11, %r12
+ movq %rax, 56(%r14)
+ movq %r12, 64(%r14)
+ adcq -56(%rbp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 72(%r14)
+ adcq -48(%rbp), %r13 ## 8-byte Folded Reload
+ movq %r13, 80(%r14)
+ adcq -104(%rbp), %r10 ## 8-byte Folded Reload
+ movq %r10, 88(%r14)
+ adcq -96(%rbp), %r15 ## 8-byte Folded Reload
+ movq %r15, 96(%r14)
+ adcq $0, %rdi
+ movq %rdi, 104(%r14)
+ adcq $0, %r8
+ movq %r8, 112(%r14)
+ adcq $0, %r9
+ movq %r9, 120(%r14)
+ addq $200, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre8Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre8Lbmi2: ## @mcl_fpDbl_sqrPre8Lbmi2
+## BB#0:
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $200, %rsp
+ movq %rsi, %rbx
+ movq %rdi, %r14
+ movq %rbx, %rdx
+ callq _mcl_fpDbl_mulPre4Lbmi2
+ leaq 64(%r14), %rdi
+ leaq 32(%rbx), %rsi
+ movq %rsi, %rdx
+ callq _mcl_fpDbl_mulPre4Lbmi2
+ movq 56(%rbx), %r15
+ movq 48(%rbx), %rax
+ movq (%rbx), %rcx
+ movq 8(%rbx), %rdx
+ addq 32(%rbx), %rcx
+ adcq 40(%rbx), %rdx
+ adcq 16(%rbx), %rax
+ adcq 24(%rbx), %r15
+ pushfq
+ popq %r8
+ pushfq
+ popq %r9
+ pushfq
+ popq %r10
+ pushfq
+ popq %rdi
+ pushfq
+ popq %rbx
+ sbbq %rsi, %rsi
+ movq %rsi, -56(%rbp) ## 8-byte Spill
+ leaq (%rcx,%rcx), %rsi
+ xorl %r11d, %r11d
+ pushq %rbx
+ popfq
+ cmovaeq %r11, %rsi
+ movq %rsi, -48(%rbp) ## 8-byte Spill
+ movq %rdx, %r13
+ shldq $1, %rcx, %r13
+ pushq %rdi
+ popfq
+ cmovaeq %r11, %r13
+ movq %rax, %r12
+ shldq $1, %rdx, %r12
+ pushq %r10
+ popfq
+ cmovaeq %r11, %r12
+ movq %r15, %rbx
+ movq %rcx, -168(%rbp)
+ movq %rdx, -160(%rbp)
+ movq %rax, -152(%rbp)
+ movq %r15, -144(%rbp)
+ movq %rcx, -136(%rbp)
+ movq %rdx, -128(%rbp)
+ movq %rax, -120(%rbp)
+ movq %r15, -112(%rbp)
+ shldq $1, %rax, %r15
+ pushq %r9
+ popfq
+ cmovaeq %r11, %r15
+ shrq $63, %rbx
+ pushq %r8
+ popfq
+ cmovaeq %r11, %rbx
+ leaq -232(%rbp), %rdi
+ leaq -168(%rbp), %rsi
+ leaq -136(%rbp), %rdx
+ callq _mcl_fpDbl_mulPre4Lbmi2
+ movq -56(%rbp), %rax ## 8-byte Reload
+ andl $1, %eax
+ movq -48(%rbp), %r10 ## 8-byte Reload
+ addq -200(%rbp), %r10
+ adcq -192(%rbp), %r13
+ adcq -184(%rbp), %r12
+ adcq -176(%rbp), %r15
+ adcq %rbx, %rax
+ movq %rax, %rbx
+ movq -208(%rbp), %rax
+ movq -216(%rbp), %rcx
+ movq -232(%rbp), %rsi
+ movq -224(%rbp), %rdx
+ subq (%r14), %rsi
+ sbbq 8(%r14), %rdx
+ sbbq 16(%r14), %rcx
+ sbbq 24(%r14), %rax
+ movq 32(%r14), %r9
+ movq %r9, -56(%rbp) ## 8-byte Spill
+ movq 40(%r14), %r8
+ movq %r8, -48(%rbp) ## 8-byte Spill
+ sbbq %r9, %r10
+ sbbq %r8, %r13
+ movq 48(%r14), %rdi
+ movq %rdi, -104(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r12
+ movq 56(%r14), %rdi
+ movq %rdi, -96(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r15
+ sbbq $0, %rbx
+ movq 64(%r14), %r11
+ subq %r11, %rsi
+ movq 72(%r14), %rdi
+ movq %rdi, -88(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rdx
+ movq 80(%r14), %rdi
+ movq %rdi, -80(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rcx
+ movq 88(%r14), %rdi
+ movq %rdi, -72(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rax
+ movq 96(%r14), %rdi
+ movq %rdi, -64(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r10
+ movq 104(%r14), %rdi
+ sbbq %rdi, %r13
+ movq 112(%r14), %r8
+ sbbq %r8, %r12
+ movq 120(%r14), %r9
+ sbbq %r9, %r15
+ sbbq $0, %rbx
+ addq -56(%rbp), %rsi ## 8-byte Folded Reload
+ adcq -48(%rbp), %rdx ## 8-byte Folded Reload
+ movq %rsi, 32(%r14)
+ adcq -104(%rbp), %rcx ## 8-byte Folded Reload
+ movq %rdx, 40(%r14)
+ adcq -96(%rbp), %rax ## 8-byte Folded Reload
+ movq %rcx, 48(%r14)
+ adcq %r11, %r10
+ movq %rax, 56(%r14)
+ movq %r10, 64(%r14)
+ adcq -88(%rbp), %r13 ## 8-byte Folded Reload
+ movq %r13, 72(%r14)
+ adcq -80(%rbp), %r12 ## 8-byte Folded Reload
+ movq %r12, 80(%r14)
+ adcq -72(%rbp), %r15 ## 8-byte Folded Reload
+ movq %r15, 88(%r14)
+ movq %rbx, %rax
+ adcq -64(%rbp), %rax ## 8-byte Folded Reload
+ movq %rax, 96(%r14)
+ adcq $0, %rdi
+ movq %rdi, 104(%r14)
+ adcq $0, %r8
+ movq %r8, 112(%r14)
+ adcq $0, %r9
+ movq %r9, 120(%r14)
+ addq $200, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont8Lbmi2: ## @mcl_fp_mont8Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1256, %rsp ## imm = 0x4E8
+ movq %rcx, %r13
+ movq %rdx, 64(%rsp) ## 8-byte Spill
+ movq %rsi, 72(%rsp) ## 8-byte Spill
+ movq %rdi, 96(%rsp) ## 8-byte Spill
+ movq -8(%r13), %rbx
+ movq %rbx, 80(%rsp) ## 8-byte Spill
+ movq %r13, 56(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1184(%rsp), %rdi
+ callq l_mulPv512x64
+ movq 1184(%rsp), %r15
+ movq 1192(%rsp), %r14
+ movq %r15, %rdx
+ imulq %rbx, %rdx
+ movq 1248(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 1240(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 1232(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 1224(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 1216(%rsp), %r12
+ movq 1208(%rsp), %rbx
+ movq 1200(%rsp), %rbp
+ leaq 1112(%rsp), %rdi
+ movq %r13, %rsi
+ callq l_mulPv512x64
+ addq 1112(%rsp), %r15
+ adcq 1120(%rsp), %r14
+ adcq 1128(%rsp), %rbp
+ movq %rbp, 88(%rsp) ## 8-byte Spill
+ adcq 1136(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 1144(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 1152(%rsp), %r13
+ movq (%rsp), %rbx ## 8-byte Reload
+ adcq 1160(%rsp), %rbx
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 1168(%rsp), %rbp
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 1176(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1040(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %r15d
+ addq 1040(%rsp), %r14
+ movq 88(%rsp), %rax ## 8-byte Reload
+ adcq 1048(%rsp), %rax
+ movq %rax, 88(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 1056(%rsp), %rax
+ movq %rax, %r12
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 1064(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ adcq 1072(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ adcq 1080(%rsp), %rbx
+ movq %rbx, (%rsp) ## 8-byte Spill
+ adcq 1088(%rsp), %rbp
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 1096(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 1104(%rsp), %r15
+ movq %r15, 48(%rsp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq %r14, %rdx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 968(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %r15d
+ addq 968(%rsp), %r14
+ movq 88(%rsp), %r13 ## 8-byte Reload
+ adcq 976(%rsp), %r13
+ adcq 984(%rsp), %r12
+ movq %r12, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 992(%rsp), %r14
+ movq 16(%rsp), %rbx ## 8-byte Reload
+ adcq 1000(%rsp), %rbx
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 1008(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 1016(%rsp), %rbp
+ movq %rbp, %r12
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 1024(%rsp), %rbp
+ movq 48(%rsp), %rax ## 8-byte Reload
+ adcq 1032(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ adcq $0, %r15
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 896(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %r13, %rcx
+ addq 896(%rsp), %rcx
+ movq 32(%rsp), %r13 ## 8-byte Reload
+ adcq 904(%rsp), %r13
+ adcq 912(%rsp), %r14
+ adcq 920(%rsp), %rbx
+ movq %rbx, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 928(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 936(%rsp), %r12
+ movq %r12, 40(%rsp) ## 8-byte Spill
+ adcq 944(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 952(%rsp), %r12
+ adcq 960(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq %rcx, %rdx
+ movq %rcx, %rbp
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 824(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %ebx
+ addq 824(%rsp), %rbp
+ adcq 832(%rsp), %r13
+ movq %r13, 32(%rsp) ## 8-byte Spill
+ adcq 840(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 848(%rsp), %r13
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 856(%rsp), %rbp
+ movq 40(%rsp), %r14 ## 8-byte Reload
+ adcq 864(%rsp), %r14
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 872(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 880(%rsp), %r12
+ adcq 888(%rsp), %r15
+ adcq $0, %rbx
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 752(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 32(%rsp), %rax ## 8-byte Reload
+ addq 752(%rsp), %rax
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 760(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ adcq 776(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 784(%rsp), %r14
+ movq %r14, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 792(%rsp), %rbp
+ adcq 800(%rsp), %r12
+ adcq 808(%rsp), %r15
+ adcq 816(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 680(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %r13, %rax
+ andl $1, %eax
+ addq 680(%rsp), %rbx
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 688(%rsp), %r14
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 696(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 704(%rsp), %r13
+ movq 40(%rsp), %rbx ## 8-byte Reload
+ adcq 712(%rsp), %rbx
+ adcq 720(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq %r12, %rbp
+ adcq 728(%rsp), %rbp
+ adcq 736(%rsp), %r15
+ movq 32(%rsp), %r12 ## 8-byte Reload
+ adcq 744(%rsp), %r12
+ adcq $0, %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 608(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %r14, %rax
+ addq 608(%rsp), %rax
+ movq 16(%rsp), %r14 ## 8-byte Reload
+ adcq 616(%rsp), %r14
+ adcq 624(%rsp), %r13
+ movq %r13, (%rsp) ## 8-byte Spill
+ adcq 632(%rsp), %rbx
+ movq %rbx, %r13
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 640(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 648(%rsp), %rbp
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 656(%rsp), %r15
+ adcq 664(%rsp), %r12
+ movq %r12, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 672(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ sbbq %rbp, %rbp
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 536(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %rbp, %rax
+ andl $1, %eax
+ addq 536(%rsp), %rbx
+ adcq 544(%rsp), %r14
+ movq %r14, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %rbx ## 8-byte Reload
+ adcq 552(%rsp), %rbx
+ adcq 560(%rsp), %r13
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 568(%rsp), %rbp
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 576(%rsp), %r12
+ adcq 584(%rsp), %r15
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 592(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 600(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 464(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 16(%rsp), %rax ## 8-byte Reload
+ addq 464(%rsp), %rax
+ adcq 472(%rsp), %rbx
+ adcq 480(%rsp), %r13
+ movq %r13, 40(%rsp) ## 8-byte Spill
+ adcq 488(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq 496(%rsp), %r12
+ adcq 504(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r15 ## 8-byte Reload
+ adcq 512(%rsp), %r15
+ adcq 520(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 528(%rsp), %r14
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %rbp
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 392(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %r13, %rax
+ andl $1, %eax
+ addq 392(%rsp), %rbp
+ adcq 400(%rsp), %rbx
+ movq %rbx, (%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 408(%rsp), %rbp
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 416(%rsp), %rbx
+ adcq 424(%rsp), %r12
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 432(%rsp), %r13
+ adcq 440(%rsp), %r15
+ movq %r15, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ adcq 448(%rsp), %r15
+ adcq 456(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 320(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq (%rsp), %rax ## 8-byte Reload
+ addq 320(%rsp), %rax
+ adcq 328(%rsp), %rbp
+ movq %rbp, 40(%rsp) ## 8-byte Spill
+ adcq 336(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ movq %r12, %rbp
+ adcq 344(%rsp), %rbp
+ adcq 352(%rsp), %r13
+ movq 32(%rsp), %r12 ## 8-byte Reload
+ adcq 360(%rsp), %r12
+ adcq 368(%rsp), %r15
+ movq %r15, 8(%rsp) ## 8-byte Spill
+ adcq 376(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 384(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 248(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %r15d
+ addq 248(%rsp), %rbx
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 256(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r14 ## 8-byte Reload
+ adcq 264(%rsp), %r14
+ adcq 272(%rsp), %rbp
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ movq %r13, %rbx
+ adcq 280(%rsp), %rbx
+ movq %r12, %rbp
+ adcq 288(%rsp), %rbp
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq 296(%rsp), %r13
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 312(%rsp), %r12
+ adcq $0, %r15
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 176(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 40(%rsp), %rax ## 8-byte Reload
+ addq 176(%rsp), %rax
+ adcq 184(%rsp), %r14
+ movq %r14, 24(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 192(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ adcq 200(%rsp), %rbx
+ movq %rbx, 16(%rsp) ## 8-byte Spill
+ adcq 208(%rsp), %rbp
+ adcq 216(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 224(%rsp), %r14
+ adcq 232(%rsp), %r12
+ adcq 240(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq 80(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %r13
+ leaq 104(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %ebx
+ addq 104(%rsp), %r13
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 112(%rsp), %rcx
+ movq 48(%rsp), %rdx ## 8-byte Reload
+ adcq 120(%rsp), %rdx
+ movq 16(%rsp), %rsi ## 8-byte Reload
+ adcq 128(%rsp), %rsi
+ movq %rbp, %rdi
+ adcq 136(%rsp), %rdi
+ movq %rdi, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r8 ## 8-byte Reload
+ adcq 144(%rsp), %r8
+ movq %r8, 8(%rsp) ## 8-byte Spill
+ movq %r14, %r9
+ adcq 152(%rsp), %r9
+ movq %r9, (%rsp) ## 8-byte Spill
+ adcq 160(%rsp), %r12
+ adcq 168(%rsp), %r15
+ adcq $0, %rbx
+ movq %rcx, %rax
+ movq %rcx, %r11
+ movq 56(%rsp), %rbp ## 8-byte Reload
+ subq (%rbp), %rax
+ movq %rdx, %rcx
+ movq %rdx, %r14
+ sbbq 8(%rbp), %rcx
+ movq %rsi, %rdx
+ movq %rsi, %r13
+ sbbq 16(%rbp), %rdx
+ movq %rdi, %rsi
+ sbbq 24(%rbp), %rsi
+ movq %r8, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %r9, %r10
+ sbbq 40(%rbp), %r10
+ movq %r12, %r8
+ sbbq 48(%rbp), %r8
+ movq %r15, %r9
+ sbbq 56(%rbp), %r9
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %r15, %r9
+ testb %bl, %bl
+ cmovneq %r11, %rax
+ movq 96(%rsp), %rbx ## 8-byte Reload
+ movq %rax, (%rbx)
+ cmovneq %r14, %rcx
+ movq %rcx, 8(%rbx)
+ cmovneq %r13, %rdx
+ movq %rdx, 16(%rbx)
+ cmovneq 32(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovneq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 32(%rbx)
+ cmovneq (%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 40(%rbx)
+ cmovneq %r12, %r8
+ movq %r8, 48(%rbx)
+ movq %r9, 56(%rbx)
+ addq $1256, %rsp ## imm = 0x4E8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF8Lbmi2: ## @mcl_fp_montNF8Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1240, %rsp ## imm = 0x4D8
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq %rdx, 48(%rsp) ## 8-byte Spill
+ movq %rsi, 56(%rsp) ## 8-byte Spill
+ movq %rdi, 80(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1168(%rsp), %rdi
+ callq l_mulPv512x64
+ movq 1168(%rsp), %r15
+ movq 1176(%rsp), %r12
+ movq %r15, %rdx
+ imulq %rbx, %rdx
+ movq 1232(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 1224(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 1216(%rsp), %r13
+ movq 1208(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 1200(%rsp), %r14
+ movq 1192(%rsp), %rbp
+ movq 1184(%rsp), %rbx
+ leaq 1096(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 1096(%rsp), %r15
+ adcq 1104(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq 1112(%rsp), %rbx
+ adcq 1120(%rsp), %rbp
+ adcq 1128(%rsp), %r14
+ movq %r14, %r12
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 1136(%rsp), %r14
+ adcq 1144(%rsp), %r13
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 1152(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 1160(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1024(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 1088(%rsp), %r15
+ movq 16(%rsp), %rax ## 8-byte Reload
+ addq 1024(%rsp), %rax
+ adcq 1032(%rsp), %rbx
+ movq %rbx, 72(%rsp) ## 8-byte Spill
+ movq %rbp, %rbx
+ adcq 1040(%rsp), %rbx
+ adcq 1048(%rsp), %r12
+ adcq 1056(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ movq %r13, %rbp
+ adcq 1064(%rsp), %rbp
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 1072(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r14 ## 8-byte Reload
+ adcq 1080(%rsp), %r14
+ adcq $0, %r15
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 952(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 952(%rsp), %r13
+ movq 72(%rsp), %rax ## 8-byte Reload
+ adcq 960(%rsp), %rax
+ movq %rax, 72(%rsp) ## 8-byte Spill
+ adcq 968(%rsp), %rbx
+ movq %rbx, 16(%rsp) ## 8-byte Spill
+ movq %r12, %rbx
+ adcq 976(%rsp), %rbx
+ movq 8(%rsp), %r12 ## 8-byte Reload
+ adcq 984(%rsp), %r12
+ adcq 992(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 1000(%rsp), %r13
+ movq %r14, %rbp
+ adcq 1008(%rsp), %rbp
+ adcq 1016(%rsp), %r15
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 880(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 944(%rsp), %r14
+ movq 72(%rsp), %rax ## 8-byte Reload
+ addq 880(%rsp), %rax
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 888(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq 896(%rsp), %rbx
+ adcq 904(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 912(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 920(%rsp), %r13
+ movq %r13, (%rsp) ## 8-byte Spill
+ adcq 928(%rsp), %rbp
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq 936(%rsp), %r15
+ adcq $0, %r14
+ movq %rax, %rdx
+ movq %rax, %rbp
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 808(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 808(%rsp), %rbp
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 816(%rsp), %r13
+ movq %rbx, %r12
+ adcq 824(%rsp), %r12
+ movq 8(%rsp), %rbx ## 8-byte Reload
+ adcq 832(%rsp), %rbx
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 840(%rsp), %rbp
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 848(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 856(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ adcq 864(%rsp), %r15
+ adcq 872(%rsp), %r14
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 736(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 800(%rsp), %rax
+ movq %r13, %rcx
+ addq 736(%rsp), %rcx
+ adcq 744(%rsp), %r12
+ movq %r12, 24(%rsp) ## 8-byte Spill
+ adcq 752(%rsp), %rbx
+ movq %rbx, 8(%rsp) ## 8-byte Spill
+ adcq 760(%rsp), %rbp
+ movq %rbp, %r13
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 768(%rsp), %rbp
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 776(%rsp), %rbx
+ adcq 784(%rsp), %r15
+ adcq 792(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq %rcx, %rdx
+ movq %rcx, %r12
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 664(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 664(%rsp), %r12
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 672(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 680(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ adcq 688(%rsp), %r13
+ adcq 696(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 704(%rsp), %rbx
+ adcq 712(%rsp), %r15
+ adcq 720(%rsp), %r14
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 728(%rsp), %r12
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 592(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 656(%rsp), %rcx
+ movq 24(%rsp), %rax ## 8-byte Reload
+ addq 592(%rsp), %rax
+ movq 8(%rsp), %rbp ## 8-byte Reload
+ adcq 600(%rsp), %rbp
+ adcq 608(%rsp), %r13
+ movq %r13, 24(%rsp) ## 8-byte Spill
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 616(%rsp), %r13
+ adcq 624(%rsp), %rbx
+ adcq 632(%rsp), %r15
+ adcq 640(%rsp), %r14
+ adcq 648(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 520(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 520(%rsp), %r12
+ adcq 528(%rsp), %rbp
+ movq %rbp, 8(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r12 ## 8-byte Reload
+ adcq 536(%rsp), %r12
+ movq %r13, %rbp
+ adcq 544(%rsp), %rbp
+ adcq 552(%rsp), %rbx
+ adcq 560(%rsp), %r15
+ adcq 568(%rsp), %r14
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 576(%rsp), %r13
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 584(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 448(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 512(%rsp), %rcx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ addq 448(%rsp), %rax
+ adcq 456(%rsp), %r12
+ movq %r12, 24(%rsp) ## 8-byte Spill
+ adcq 464(%rsp), %rbp
+ adcq 472(%rsp), %rbx
+ adcq 480(%rsp), %r15
+ adcq 488(%rsp), %r14
+ adcq 496(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 504(%rsp), %r13
+ adcq $0, %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 376(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 376(%rsp), %r12
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 384(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 392(%rsp), %rbp
+ adcq 400(%rsp), %rbx
+ adcq 408(%rsp), %r15
+ adcq 416(%rsp), %r14
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 424(%rsp), %r12
+ adcq 432(%rsp), %r13
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 440(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 304(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 368(%rsp), %rcx
+ movq 24(%rsp), %rax ## 8-byte Reload
+ addq 304(%rsp), %rax
+ adcq 312(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 320(%rsp), %rbx
+ adcq 328(%rsp), %r15
+ adcq 336(%rsp), %r14
+ adcq 344(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq 352(%rsp), %r13
+ movq 8(%rsp), %rbp ## 8-byte Reload
+ adcq 360(%rsp), %rbp
+ adcq $0, %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 232(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 232(%rsp), %r12
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 240(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 248(%rsp), %rbx
+ adcq 256(%rsp), %r15
+ adcq 264(%rsp), %r14
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 272(%rsp), %r12
+ adcq 280(%rsp), %r13
+ adcq 288(%rsp), %rbp
+ movq %rbp, 8(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rbp ## 8-byte Reload
+ adcq 296(%rsp), %rbp
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 160(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 224(%rsp), %rcx
+ movq (%rsp), %rax ## 8-byte Reload
+ addq 160(%rsp), %rax
+ adcq 168(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 176(%rsp), %r15
+ adcq 184(%rsp), %r14
+ adcq 192(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq 200(%rsp), %r13
+ movq 8(%rsp), %rbx ## 8-byte Reload
+ adcq 208(%rsp), %rbx
+ adcq 216(%rsp), %rbp
+ movq %rbp, %r12
+ adcq $0, %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbp
+ leaq 88(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 88(%rsp), %rbp
+ movq 32(%rsp), %r11 ## 8-byte Reload
+ adcq 96(%rsp), %r11
+ adcq 104(%rsp), %r15
+ adcq 112(%rsp), %r14
+ movq 16(%rsp), %rsi ## 8-byte Reload
+ adcq 120(%rsp), %rsi
+ movq %rsi, 16(%rsp) ## 8-byte Spill
+ adcq 128(%rsp), %r13
+ adcq 136(%rsp), %rbx
+ movq %rbx, 8(%rsp) ## 8-byte Spill
+ adcq 144(%rsp), %r12
+ movq (%rsp), %r8 ## 8-byte Reload
+ adcq 152(%rsp), %r8
+ movq %r11, %rax
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ subq (%rbp), %rax
+ movq %r15, %rcx
+ sbbq 8(%rbp), %rcx
+ movq %r14, %rdx
+ sbbq 16(%rbp), %rdx
+ sbbq 24(%rbp), %rsi
+ movq %r13, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %rbx, %r9
+ sbbq 40(%rbp), %r9
+ movq %r12, %r10
+ sbbq 48(%rbp), %r10
+ movq %rbp, %rbx
+ movq %r8, %rbp
+ sbbq 56(%rbx), %rbp
+ testq %rbp, %rbp
+ cmovsq %r11, %rax
+ movq 80(%rsp), %rbx ## 8-byte Reload
+ movq %rax, (%rbx)
+ cmovsq %r15, %rcx
+ movq %rcx, 8(%rbx)
+ cmovsq %r14, %rdx
+ movq %rdx, 16(%rbx)
+ cmovsq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovsq %r13, %rdi
+ movq %rdi, 32(%rbx)
+ cmovsq 8(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 40(%rbx)
+ cmovsq %r12, %r10
+ movq %r10, 48(%rbx)
+ cmovsq %r8, %rbp
+ movq %rbp, 56(%rbx)
+ addq $1240, %rsp ## imm = 0x4D8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed8Lbmi2: ## @mcl_fp_montRed8Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $776, %rsp ## imm = 0x308
+ movq %rdx, %rax
+ movq %rdi, 192(%rsp) ## 8-byte Spill
+ movq -8(%rax), %rcx
+ movq %rcx, 104(%rsp) ## 8-byte Spill
+ movq (%rsi), %r15
+ movq 8(%rsi), %rdx
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq %r15, %rdx
+ imulq %rcx, %rdx
+ movq 120(%rsi), %rcx
+ movq %rcx, 112(%rsp) ## 8-byte Spill
+ movq 112(%rsi), %rcx
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ movq 104(%rsi), %rcx
+ movq %rcx, 96(%rsp) ## 8-byte Spill
+ movq 96(%rsi), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq 88(%rsi), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ movq 80(%rsi), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 72(%rsi), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 64(%rsi), %r13
+ movq 56(%rsi), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 48(%rsi), %r14
+ movq 40(%rsi), %rcx
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %r12
+ movq 24(%rsi), %rbx
+ movq 16(%rsi), %rbp
+ movq %rax, %rcx
+ movq (%rcx), %rax
+ movq %rax, 136(%rsp) ## 8-byte Spill
+ movq 56(%rcx), %rax
+ movq %rax, 184(%rsp) ## 8-byte Spill
+ movq 48(%rcx), %rax
+ movq %rax, 176(%rsp) ## 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 168(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 160(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, 152(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 144(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, 128(%rsp) ## 8-byte Spill
+ movq %rcx, %rsi
+ movq %rsi, 88(%rsp) ## 8-byte Spill
+ leaq 704(%rsp), %rdi
+ callq l_mulPv512x64
+ addq 704(%rsp), %r15
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 712(%rsp), %rcx
+ adcq 720(%rsp), %rbp
+ movq %rbp, 80(%rsp) ## 8-byte Spill
+ adcq 728(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 736(%rsp), %r12
+ movq %r12, 120(%rsp) ## 8-byte Spill
+ movq 72(%rsp), %rax ## 8-byte Reload
+ adcq 744(%rsp), %rax
+ movq %rax, 72(%rsp) ## 8-byte Spill
+ adcq 752(%rsp), %r14
+ movq %r14, %r12
+ movq 64(%rsp), %rax ## 8-byte Reload
+ adcq 760(%rsp), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ adcq $0, 16(%rsp) ## 8-byte Folded Spill
+ movq 40(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 24(%rsp) ## 8-byte Folded Spill
+ adcq $0, 48(%rsp) ## 8-byte Folded Spill
+ adcq $0, 96(%rsp) ## 8-byte Folded Spill
+ movq 56(%rsp), %r13 ## 8-byte Reload
+ adcq $0, %r13
+ movq 112(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ sbbq %rbx, %rbx
+ movq %rcx, %rbp
+ movq %rbp, %rdx
+ imulq 104(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 632(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 632(%rsp), %rbp
+ movq 80(%rsp), %rsi ## 8-byte Reload
+ adcq 640(%rsp), %rsi
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 648(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq 120(%rsp), %rcx ## 8-byte Reload
+ adcq 656(%rsp), %rcx
+ movq %rcx, 120(%rsp) ## 8-byte Spill
+ movq 72(%rsp), %rcx ## 8-byte Reload
+ adcq 664(%rsp), %rcx
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ adcq 672(%rsp), %r12
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 680(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 688(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 696(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r15
+ movq %r15, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ movq 48(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 96(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r13
+ movq %r13, 56(%rsp) ## 8-byte Spill
+ adcq $0, %r14
+ movq %r14, 112(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ adcq $0, %rbp
+ movq %rsi, %rdx
+ movq %rsi, %r14
+ imulq 104(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 560(%rsp), %rdi
+ movq 88(%rsp), %r13 ## 8-byte Reload
+ movq %r13, %rsi
+ callq l_mulPv512x64
+ addq 560(%rsp), %r14
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 568(%rsp), %rcx
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 576(%rsp), %rax
+ movq %rax, 120(%rsp) ## 8-byte Spill
+ movq 72(%rsp), %rax ## 8-byte Reload
+ adcq 584(%rsp), %rax
+ movq %rax, 72(%rsp) ## 8-byte Spill
+ adcq 592(%rsp), %r12
+ movq %r12, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %r14 ## 8-byte Reload
+ adcq 600(%rsp), %r14
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 608(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 616(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 624(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq $0, %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r15
+ movq %r15, 48(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ movq 56(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 112(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rbp
+ movq %rbp, 80(%rsp) ## 8-byte Spill
+ movq %rcx, %rbp
+ movq %rbp, %rdx
+ movq 104(%rsp), %r12 ## 8-byte Reload
+ imulq %r12, %rdx
+ leaq 488(%rsp), %rdi
+ movq %r13, %rsi
+ callq l_mulPv512x64
+ addq 488(%rsp), %rbp
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 496(%rsp), %rax
+ movq 72(%rsp), %rbp ## 8-byte Reload
+ adcq 504(%rsp), %rbp
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 512(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ adcq 520(%rsp), %r14
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 528(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 536(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %r13 ## 8-byte Reload
+ adcq 544(%rsp), %r13
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 552(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, 48(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 96(%rsp) ## 8-byte Spill
+ movq %r15, %rbx
+ adcq $0, %rbx
+ adcq $0, 112(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq %r12, %rdx
+ leaq 416(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 416(%rsp), %r15
+ adcq 424(%rsp), %rbp
+ movq %rbp, %rax
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq %r14, %r12
+ adcq 440(%rsp), %r12
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 448(%rsp), %r14
+ movq 16(%rsp), %rbp ## 8-byte Reload
+ adcq 456(%rsp), %rbp
+ adcq 464(%rsp), %r13
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 472(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ adcq $0, 96(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ movq 112(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 104(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 344(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 344(%rsp), %rbx
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 352(%rsp), %rax
+ adcq 360(%rsp), %r12
+ movq %r12, 64(%rsp) ## 8-byte Spill
+ adcq 368(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ adcq 376(%rsp), %rbp
+ movq %rbp, 16(%rsp) ## 8-byte Spill
+ adcq 384(%rsp), %r13
+ movq %r13, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r13 ## 8-byte Reload
+ adcq 392(%rsp), %r13
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 400(%rsp), %r12
+ movq 96(%rsp), %r14 ## 8-byte Reload
+ adcq 408(%rsp), %r14
+ movq 56(%rsp), %rbp ## 8-byte Reload
+ adcq $0, %rbp
+ movq %r15, %rbx
+ adcq $0, %rbx
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq 104(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 272(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 272(%rsp), %r15
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 280(%rsp), %rcx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 288(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 296(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 312(%rsp), %r13
+ movq %r13, 24(%rsp) ## 8-byte Spill
+ adcq 320(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ adcq 328(%rsp), %r14
+ movq %r14, %r13
+ adcq 336(%rsp), %rbp
+ movq %rbp, %r12
+ adcq $0, %rbx
+ movq %rbx, %r14
+ movq 80(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ movq 104(%rsp), %rdx ## 8-byte Reload
+ movq %rcx, %rbx
+ imulq %rbx, %rdx
+ leaq 200(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 200(%rsp), %rbx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 208(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r8 ## 8-byte Reload
+ adcq 216(%rsp), %r8
+ movq %r8, 16(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rdx ## 8-byte Reload
+ adcq 224(%rsp), %rdx
+ movq 24(%rsp), %rsi ## 8-byte Reload
+ adcq 232(%rsp), %rsi
+ movq 48(%rsp), %rdi ## 8-byte Reload
+ adcq 240(%rsp), %rdi
+ movq %r13, %rbp
+ adcq 248(%rsp), %rbp
+ movq %r12, %rbx
+ adcq 256(%rsp), %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ movq %r14, %r9
+ adcq 264(%rsp), %r9
+ adcq $0, %r15
+ movq %r15, %r10
+ subq 136(%rsp), %rax ## 8-byte Folded Reload
+ movq %r8, %rcx
+ sbbq 128(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rdx, %r13
+ sbbq 144(%rsp), %r13 ## 8-byte Folded Reload
+ movq %rsi, %r12
+ sbbq 152(%rsp), %r12 ## 8-byte Folded Reload
+ movq %rdi, %r14
+ sbbq 160(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rbp, %r11
+ sbbq 168(%rsp), %r11 ## 8-byte Folded Reload
+ movq %rbx, %r8
+ sbbq 176(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r9, %r15
+ sbbq 184(%rsp), %r9 ## 8-byte Folded Reload
+ sbbq $0, %r10
+ andl $1, %r10d
+ cmovneq %r15, %r9
+ testb %r10b, %r10b
+ cmovneq 8(%rsp), %rax ## 8-byte Folded Reload
+ movq 192(%rsp), %rbx ## 8-byte Reload
+ movq %rax, (%rbx)
+ cmovneq 16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 8(%rbx)
+ cmovneq %rdx, %r13
+ movq %r13, 16(%rbx)
+ cmovneq %rsi, %r12
+ movq %r12, 24(%rbx)
+ cmovneq %rdi, %r14
+ movq %r14, 32(%rbx)
+ cmovneq %rbp, %r11
+ movq %r11, 40(%rbx)
+ cmovneq 56(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 48(%rbx)
+ movq %r9, 56(%rbx)
+ addq $776, %rsp ## imm = 0x308
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre8Lbmi2: ## @mcl_fp_addPre8Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 56(%rsi), %r15
+ movq 48(%rdx), %r9
+ movq 48(%rsi), %r12
+ movq 40(%rdx), %r10
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %r14
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rsi
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r14, %rax
+ movq %rax, 24(%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r10, %r13
+ movq %r13, 40(%rdi)
+ adcq %r9, %r12
+ movq %r12, 48(%rdi)
+ adcq %r8, %r15
+ movq %r15, 56(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subPre8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre8Lbmi2: ## @mcl_fp_subPre8Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 56(%rsi), %r15
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %r12
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %r12
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq 48(%rsi), %r13
+ movq 40(%rsi), %rdx
+ movq 32(%rsi), %rbp
+ movq 24(%rsi), %rsi
+ movq %rbx, (%rdi)
+ movq %r12, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ sbbq %r14, %rbp
+ movq %rbp, 32(%rdi)
+ sbbq %r10, %rdx
+ movq %rdx, 40(%rdi)
+ sbbq %r9, %r13
+ movq %r13, 48(%rdi)
+ sbbq %r8, %r15
+ movq %r15, 56(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_shr1_8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_8Lbmi2: ## @mcl_fp_shr1_8Lbmi2
+## BB#0:
+ movq 56(%rsi), %r8
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r10
+ movq 32(%rsi), %r11
+ movq 24(%rsi), %rcx
+ movq 16(%rsi), %rdx
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rax
+ movq %rax, (%rdi)
+ shrdq $1, %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 16(%rdi)
+ shrdq $1, %r11, %rcx
+ movq %rcx, 24(%rdi)
+ shrdq $1, %r10, %r11
+ movq %r11, 32(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 40(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 48(%rdi)
+ shrq %r8
+ movq %r8, 56(%rdi)
+ retq
+
+ .globl _mcl_fp_add8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add8Lbmi2: ## @mcl_fp_add8Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r15
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r12
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %r11
+ movq 32(%rsi), %r10
+ movq (%rdx), %r14
+ movq 8(%rdx), %rbx
+ addq (%rsi), %r14
+ adcq 8(%rsi), %rbx
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r11
+ movq 40(%rdx), %rsi
+ adcq 32(%rdx), %r10
+ movq %r14, (%rdi)
+ movq %rbx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r10, 32(%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r12, %r9
+ movq %r9, 48(%rdi)
+ adcq %r15, %r8
+ movq %r8, 56(%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %r14
+ sbbq 8(%rcx), %rbx
+ sbbq 16(%rcx), %rax
+ sbbq 24(%rcx), %r11
+ sbbq 32(%rcx), %r10
+ sbbq 40(%rcx), %rsi
+ sbbq 48(%rcx), %r9
+ sbbq 56(%rcx), %r8
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne LBB120_2
+## BB#1: ## %nocarry
+ movq %r14, (%rdi)
+ movq %rbx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r10, 32(%rdi)
+ movq %rsi, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %r8, 56(%rdi)
+LBB120_2: ## %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addNF8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF8Lbmi2: ## @mcl_fp_addNF8Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 48(%rdx), %rbp
+ movq 40(%rdx), %rbx
+ movq 32(%rdx), %rax
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r15
+ movq (%rdx), %r13
+ movq 8(%rdx), %r12
+ addq (%rsi), %r13
+ adcq 8(%rsi), %r12
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %rax
+ movq %rax, %r10
+ movq %r10, -24(%rsp) ## 8-byte Spill
+ adcq 40(%rsi), %rbx
+ movq %rbx, %r9
+ movq %r9, -16(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %rbp
+ movq %rbp, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ adcq 56(%rsi), %r8
+ movq %r13, %rsi
+ subq (%rcx), %rsi
+ movq %r12, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r15, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r11, %r14
+ sbbq 24(%rcx), %r14
+ movq %r10, %rbp
+ sbbq 32(%rcx), %rbp
+ movq %r9, %r10
+ sbbq 40(%rcx), %r10
+ movq %rax, %r9
+ sbbq 48(%rcx), %r9
+ movq %r8, %rax
+ sbbq 56(%rcx), %rax
+ testq %rax, %rax
+ cmovsq %r13, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r12, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r15, %rbx
+ movq %rbx, 16(%rdi)
+ cmovsq %r11, %r14
+ movq %r14, 24(%rdi)
+ cmovsq -24(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rdi)
+ cmovsq -16(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 40(%rdi)
+ cmovsq -8(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 48(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_sub8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub8Lbmi2: ## @mcl_fp_sub8Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r12
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r13
+ movq (%rsi), %rax
+ movq 8(%rsi), %r10
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r10
+ movq 16(%rsi), %r11
+ sbbq 16(%rdx), %r11
+ movq 24(%rsi), %r15
+ sbbq 24(%rdx), %r15
+ movq 32(%rsi), %r14
+ sbbq 32(%rdx), %r14
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %rsi
+ sbbq 40(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r10, 8(%rdi)
+ movq %r11, 16(%rdi)
+ movq %r15, 24(%rdi)
+ movq %r14, 32(%rdi)
+ movq %rsi, 40(%rdi)
+ sbbq %r13, %r9
+ movq %r9, 48(%rdi)
+ sbbq %r12, %r8
+ movq %r8, 56(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB122_2
+## BB#1: ## %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ movq 8(%rcx), %rax
+ adcq %r10, %rax
+ movq %rax, 8(%rdi)
+ movq 16(%rcx), %rax
+ adcq %r11, %rax
+ movq %rax, 16(%rdi)
+ movq 24(%rcx), %rax
+ adcq %r15, %rax
+ movq %rax, 24(%rdi)
+ movq 32(%rcx), %rax
+ adcq %r14, %rax
+ movq %rax, 32(%rdi)
+ movq 40(%rcx), %rax
+ adcq %rsi, %rax
+ movq %rax, 40(%rdi)
+ movq 48(%rcx), %rax
+ adcq %r9, %rax
+ movq %rax, 48(%rdi)
+ movq 56(%rcx), %rax
+ adcq %r8, %rax
+ movq %rax, 56(%rdi)
+LBB122_2: ## %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subNF8Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF8Lbmi2: ## @mcl_fp_subNF8Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq %rdi, %r9
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ movdqu 32(%rdx), %xmm2
+ movdqu 48(%rdx), %xmm3
+ pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1]
+ movd %xmm4, %r12
+ movdqu (%rsi), %xmm4
+ movdqu 16(%rsi), %xmm5
+ movdqu 32(%rsi), %xmm8
+ movdqu 48(%rsi), %xmm7
+ pshufd $78, %xmm7, %xmm6 ## xmm6 = xmm7[2,3,0,1]
+ movd %xmm6, %rcx
+ movd %xmm3, %r13
+ movd %xmm7, %rdi
+ pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1]
+ movd %xmm3, %rbp
+ pshufd $78, %xmm8, %xmm3 ## xmm3 = xmm8[2,3,0,1]
+ movd %xmm3, %rdx
+ movd %xmm2, %rsi
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r11
+ pshufd $78, %xmm5, %xmm2 ## xmm2 = xmm5[2,3,0,1]
+ movd %xmm1, %r15
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %rbx
+ pshufd $78, %xmm4, %xmm1 ## xmm1 = xmm4[2,3,0,1]
+ movd %xmm0, %rax
+ movd %xmm4, %r14
+ subq %rax, %r14
+ movd %xmm1, %r10
+ sbbq %rbx, %r10
+ movd %xmm5, %rbx
+ sbbq %r15, %rbx
+ movd %xmm2, %r15
+ sbbq %r11, %r15
+ movd %xmm8, %r11
+ sbbq %rsi, %r11
+ sbbq %rbp, %rdx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ sbbq %r13, %rdi
+ movq %rdi, -16(%rsp) ## 8-byte Spill
+ sbbq %r12, %rcx
+ movq %rcx, -8(%rsp) ## 8-byte Spill
+ movq %rcx, %rbp
+ sarq $63, %rbp
+ movq 56(%r8), %r12
+ andq %rbp, %r12
+ movq 48(%r8), %r13
+ andq %rbp, %r13
+ movq 40(%r8), %rdi
+ andq %rbp, %rdi
+ movq 32(%r8), %rsi
+ andq %rbp, %rsi
+ movq 24(%r8), %rdx
+ andq %rbp, %rdx
+ movq 16(%r8), %rcx
+ andq %rbp, %rcx
+ movq 8(%r8), %rax
+ andq %rbp, %rax
+ andq (%r8), %rbp
+ addq %r14, %rbp
+ adcq %r10, %rax
+ movq %rbp, (%r9)
+ adcq %rbx, %rcx
+ movq %rax, 8(%r9)
+ movq %rcx, 16(%r9)
+ adcq %r15, %rdx
+ movq %rdx, 24(%r9)
+ adcq %r11, %rsi
+ movq %rsi, 32(%r9)
+ adcq -24(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 40(%r9)
+ adcq -16(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 48(%r9)
+ adcq -8(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, 56(%r9)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_add8Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add8Lbmi2: ## @mcl_fpDbl_add8Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 120(%rdx), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq 112(%rdx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq 104(%rdx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq 96(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r11
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %r15
+ adcq 32(%rdx), %r11
+ movq 88(%rdx), %rbp
+ movq 80(%rdx), %r13
+ movq %rbx, (%rdi)
+ movq 72(%rdx), %r10
+ movq %rax, 8(%rdi)
+ movq 64(%rdx), %r9
+ movq %r12, 16(%rdi)
+ movq 40(%rdx), %r12
+ movq %r15, 24(%rdi)
+ movq 40(%rsi), %rbx
+ adcq %r12, %rbx
+ movq 56(%rdx), %r15
+ movq 48(%rdx), %r12
+ movq %r11, 32(%rdi)
+ movq 48(%rsi), %rdx
+ adcq %r12, %rdx
+ movq 120(%rsi), %r12
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rax
+ adcq %r15, %rax
+ movq 112(%rsi), %rcx
+ movq %rdx, 48(%rdi)
+ movq 64(%rsi), %rbx
+ adcq %r9, %rbx
+ movq 104(%rsi), %rdx
+ movq %rax, 56(%rdi)
+ movq 72(%rsi), %r9
+ adcq %r10, %r9
+ movq 80(%rsi), %r11
+ adcq %r13, %r11
+ movq 96(%rsi), %rax
+ movq 88(%rsi), %r15
+ adcq %rbp, %r15
+ adcq %r14, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rdx, %rax
+ adcq -24(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ adcq -16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -16(%rsp) ## 8-byte Spill
+ adcq -32(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, -32(%rsp) ## 8-byte Spill
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq %rbx, %rsi
+ subq (%r8), %rsi
+ movq %r9, %rdx
+ sbbq 8(%r8), %rdx
+ movq %r11, %r10
+ sbbq 16(%r8), %r10
+ movq %r15, %r14
+ sbbq 24(%r8), %r14
+ movq -8(%rsp), %r13 ## 8-byte Reload
+ sbbq 32(%r8), %r13
+ movq %rax, %r12
+ sbbq 40(%r8), %r12
+ movq %rcx, %rax
+ sbbq 48(%r8), %rax
+ movq -32(%rsp), %rcx ## 8-byte Reload
+ sbbq 56(%r8), %rcx
+ sbbq $0, %rbp
+ andl $1, %ebp
+ cmovneq %rbx, %rsi
+ movq %rsi, 64(%rdi)
+ testb %bpl, %bpl
+ cmovneq %r9, %rdx
+ movq %rdx, 72(%rdi)
+ cmovneq %r11, %r10
+ movq %r10, 80(%rdi)
+ cmovneq %r15, %r14
+ movq %r14, 88(%rdi)
+ cmovneq -8(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 96(%rdi)
+ cmovneq -24(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, 104(%rdi)
+ cmovneq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 112(%rdi)
+ cmovneq -32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 120(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub8Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub8Lbmi2: ## @mcl_fpDbl_sub8Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r15
+ movq 120(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 112(%rdx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq 104(%rdx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r9
+ movq (%rsi), %r12
+ movq 8(%rsi), %r14
+ xorl %r8d, %r8d
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r14
+ sbbq 16(%rdx), %r9
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r13
+ sbbq 32(%rdx), %r13
+ movq 96(%rdx), %rbp
+ movq 88(%rdx), %r11
+ movq %r12, (%rdi)
+ movq 80(%rdx), %r12
+ movq %r14, 8(%rdi)
+ movq 72(%rdx), %r10
+ movq %r9, 16(%rdi)
+ movq 40(%rdx), %r9
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %r9, %rbx
+ movq 48(%rdx), %r9
+ movq %r13, 32(%rdi)
+ movq 48(%rsi), %r14
+ sbbq %r9, %r14
+ movq 64(%rdx), %r13
+ movq 56(%rdx), %r9
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rdx
+ sbbq %r9, %rdx
+ movq 120(%rsi), %rcx
+ movq %r14, 48(%rdi)
+ movq 64(%rsi), %rbx
+ sbbq %r13, %rbx
+ movq 112(%rsi), %rax
+ movq %rdx, 56(%rdi)
+ movq 72(%rsi), %r9
+ sbbq %r10, %r9
+ movq 80(%rsi), %r13
+ sbbq %r12, %r13
+ movq 88(%rsi), %r12
+ sbbq %r11, %r12
+ movq 104(%rsi), %rdx
+ movq 96(%rsi), %r14
+ sbbq %rbp, %r14
+ sbbq -24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ sbbq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ sbbq -8(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -8(%rsp) ## 8-byte Spill
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%r15), %r11
+ cmoveq %r8, %r11
+ testb %bpl, %bpl
+ movq 16(%r15), %rbp
+ cmoveq %r8, %rbp
+ movq 8(%r15), %rsi
+ cmoveq %r8, %rsi
+ movq 56(%r15), %r10
+ cmoveq %r8, %r10
+ movq 48(%r15), %rdx
+ cmoveq %r8, %rdx
+ movq 40(%r15), %rcx
+ cmoveq %r8, %rcx
+ movq 32(%r15), %rax
+ cmoveq %r8, %rax
+ cmovneq 24(%r15), %r8
+ addq %rbx, %r11
+ adcq %r9, %rsi
+ movq %r11, 64(%rdi)
+ adcq %r13, %rbp
+ movq %rsi, 72(%rdi)
+ movq %rbp, 80(%rdi)
+ adcq %r12, %r8
+ movq %r8, 88(%rdi)
+ adcq %r14, %rax
+ movq %rax, 96(%rdi)
+ adcq -24(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 104(%rdi)
+ adcq -16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 112(%rdi)
+ adcq -8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 120(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .p2align 4, 0x90
+l_mulPv576x64: ## @mulPv576x64
+## BB#0:
+ mulxq (%rsi), %rcx, %rax
+ movq %rcx, (%rdi)
+ mulxq 8(%rsi), %rcx, %r8
+ addq %rax, %rcx
+ movq %rcx, 8(%rdi)
+ mulxq 16(%rsi), %rcx, %r9
+ adcq %r8, %rcx
+ movq %rcx, 16(%rdi)
+ mulxq 24(%rsi), %rax, %rcx
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ mulxq 32(%rsi), %rax, %r8
+ adcq %rcx, %rax
+ movq %rax, 32(%rdi)
+ mulxq 40(%rsi), %rcx, %r9
+ adcq %r8, %rcx
+ movq %rcx, 40(%rdi)
+ mulxq 48(%rsi), %rax, %rcx
+ adcq %r9, %rax
+ movq %rax, 48(%rdi)
+ mulxq 56(%rsi), %rax, %r8
+ adcq %rcx, %rax
+ movq %rax, 56(%rdi)
+ mulxq 64(%rsi), %rax, %rcx
+ adcq %r8, %rax
+ movq %rax, 64(%rdi)
+ adcq $0, %rcx
+ movq %rcx, 72(%rdi)
+ movq %rdi, %rax
+ retq
+
+ .globl _mcl_fp_mulUnitPre9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre9Lbmi2: ## @mcl_fp_mulUnitPre9Lbmi2
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ subq $88, %rsp
+ movq %rdi, %rbx
+ leaq 8(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 80(%rsp), %r8
+ movq 72(%rsp), %r9
+ movq 64(%rsp), %r10
+ movq 56(%rsp), %r11
+ movq 48(%rsp), %r14
+ movq 40(%rsp), %rax
+ movq 32(%rsp), %rcx
+ movq 24(%rsp), %rdx
+ movq 8(%rsp), %rsi
+ movq 16(%rsp), %rdi
+ movq %rsi, (%rbx)
+ movq %rdi, 8(%rbx)
+ movq %rdx, 16(%rbx)
+ movq %rcx, 24(%rbx)
+ movq %rax, 32(%rbx)
+ movq %r14, 40(%rbx)
+ movq %r11, 48(%rbx)
+ movq %r10, 56(%rbx)
+ movq %r9, 64(%rbx)
+ movq %r8, 72(%rbx)
+ addq $88, %rsp
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fpDbl_mulPre9Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre9Lbmi2: ## @mcl_fpDbl_mulPre9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $808, %rsp ## imm = 0x328
+ movq %rdx, %rax
+ movq %rdi, %r12
+ movq (%rax), %rdx
+ movq %rax, %rbx
+ movq %rbx, 80(%rsp) ## 8-byte Spill
+ leaq 728(%rsp), %rdi
+ movq %rsi, %rbp
+ movq %rbp, 72(%rsp) ## 8-byte Spill
+ callq l_mulPv576x64
+ movq 800(%rsp), %r13
+ movq 792(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 784(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 776(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 768(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 760(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 752(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 744(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 728(%rsp), %rax
+ movq 736(%rsp), %r14
+ movq %rax, (%r12)
+ movq %r12, 64(%rsp) ## 8-byte Spill
+ movq 8(%rbx), %rdx
+ leaq 648(%rsp), %rdi
+ movq %rbp, %rsi
+ callq l_mulPv576x64
+ movq 720(%rsp), %r8
+ movq 712(%rsp), %rcx
+ movq 704(%rsp), %rdx
+ movq 696(%rsp), %rsi
+ movq 688(%rsp), %rdi
+ movq 680(%rsp), %rbp
+ addq 648(%rsp), %r14
+ movq 672(%rsp), %rax
+ movq 656(%rsp), %rbx
+ movq 664(%rsp), %r15
+ movq %r14, 8(%r12)
+ adcq 24(%rsp), %rbx ## 8-byte Folded Reload
+ adcq 32(%rsp), %r15 ## 8-byte Folded Reload
+ adcq 40(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, %r14
+ adcq (%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 32(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 40(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, (%rsp) ## 8-byte Spill
+ adcq %r13, %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 16(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %r13 ## 8-byte Reload
+ movq 16(%r13), %rdx
+ leaq 568(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 640(%rsp), %r8
+ movq 632(%rsp), %r9
+ movq 624(%rsp), %r10
+ movq 616(%rsp), %rdi
+ movq 608(%rsp), %rbp
+ movq 600(%rsp), %rcx
+ addq 568(%rsp), %rbx
+ movq 592(%rsp), %rdx
+ movq 576(%rsp), %r12
+ movq 584(%rsp), %rsi
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq %rbx, 16(%rax)
+ adcq %r15, %r12
+ adcq %r14, %rsi
+ movq %rsi, 48(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 40(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 8(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 16(%rsp) ## 8-byte Spill
+ movq 24(%r13), %rdx
+ leaq 488(%rsp), %rdi
+ movq 72(%rsp), %r15 ## 8-byte Reload
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 560(%rsp), %r8
+ movq 552(%rsp), %rcx
+ movq 544(%rsp), %rdx
+ movq 536(%rsp), %rsi
+ movq 528(%rsp), %rdi
+ movq 520(%rsp), %rbp
+ addq 488(%rsp), %r12
+ movq 512(%rsp), %rax
+ movq 496(%rsp), %rbx
+ movq 504(%rsp), %r13
+ movq 64(%rsp), %r14 ## 8-byte Reload
+ movq %r12, 24(%r14)
+ adcq 48(%rsp), %rbx ## 8-byte Folded Reload
+ adcq 56(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 24(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 40(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, (%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %r12 ## 8-byte Reload
+ movq 32(%r12), %rdx
+ leaq 408(%rsp), %rdi
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 480(%rsp), %r8
+ movq 472(%rsp), %r9
+ movq 464(%rsp), %rdx
+ movq 456(%rsp), %rsi
+ movq 448(%rsp), %rdi
+ movq 440(%rsp), %rbp
+ addq 408(%rsp), %rbx
+ movq 432(%rsp), %rax
+ movq 416(%rsp), %r15
+ movq 424(%rsp), %rcx
+ movq %rbx, 32(%r14)
+ adcq %r13, %r15
+ adcq 24(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 40(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) ## 8-byte Spill
+ movq %r12, %r14
+ movq 40(%r14), %rdx
+ leaq 328(%rsp), %rdi
+ movq 72(%rsp), %r13 ## 8-byte Reload
+ movq %r13, %rsi
+ callq l_mulPv576x64
+ movq 400(%rsp), %r8
+ movq 392(%rsp), %r9
+ movq 384(%rsp), %rsi
+ movq 376(%rsp), %rdi
+ movq 368(%rsp), %rbx
+ movq 360(%rsp), %rbp
+ addq 328(%rsp), %r15
+ movq 352(%rsp), %rcx
+ movq 336(%rsp), %r12
+ movq 344(%rsp), %rdx
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq %r15, 40(%rax)
+ adcq 56(%rsp), %r12 ## 8-byte Folded Reload
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 40(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) ## 8-byte Spill
+ movq 48(%r14), %rdx
+ leaq 248(%rsp), %rdi
+ movq %r13, %rsi
+ movq %r13, %r15
+ callq l_mulPv576x64
+ movq 320(%rsp), %r8
+ movq 312(%rsp), %r9
+ movq 304(%rsp), %rsi
+ movq 296(%rsp), %rdi
+ movq 288(%rsp), %rbx
+ movq 280(%rsp), %rbp
+ addq 248(%rsp), %r12
+ movq 272(%rsp), %rcx
+ movq 256(%rsp), %r13
+ movq 264(%rsp), %rdx
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq %r12, 48(%rax)
+ adcq 56(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 40(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) ## 8-byte Spill
+ movq 56(%r14), %rdx
+ leaq 168(%rsp), %rdi
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 240(%rsp), %rcx
+ movq 232(%rsp), %rdx
+ movq 224(%rsp), %rsi
+ movq 216(%rsp), %rdi
+ movq 208(%rsp), %rbx
+ addq 168(%rsp), %r13
+ movq 200(%rsp), %r12
+ movq 192(%rsp), %rbp
+ movq 176(%rsp), %r14
+ movq 184(%rsp), %r15
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq %r13, 56(%rax)
+ adcq 56(%rsp), %r14 ## 8-byte Folded Reload
+ adcq 24(%rsp), %r15 ## 8-byte Folded Reload
+ adcq 32(%rsp), %rbp ## 8-byte Folded Reload
+ adcq 40(%rsp), %r12 ## 8-byte Folded Reload
+ adcq (%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %r13
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 88(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 88(%rsp), %r14
+ adcq 96(%rsp), %r15
+ movq 160(%rsp), %r8
+ adcq 104(%rsp), %rbp
+ movq 152(%rsp), %r9
+ movq 144(%rsp), %rdx
+ movq 136(%rsp), %rsi
+ movq 128(%rsp), %rdi
+ movq 120(%rsp), %rbx
+ movq 112(%rsp), %rax
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ movq %r14, 64(%rcx)
+ movq %r15, 72(%rcx)
+ adcq %r12, %rax
+ movq %rbp, 80(%rcx)
+ movq %rax, 88(%rcx)
+ adcq %r13, %rbx
+ movq %rbx, 96(%rcx)
+ adcq (%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 104(%rcx)
+ adcq 8(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 112(%rcx)
+ adcq 16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 120(%rcx)
+ adcq 48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 128(%rcx)
+ adcq $0, %r8
+ movq %r8, 136(%rcx)
+ addq $808, %rsp ## imm = 0x328
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre9Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre9Lbmi2: ## @mcl_fpDbl_sqrPre9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $808, %rsp ## imm = 0x328
+ movq %rsi, %r15
+ movq %rdi, %r14
+ movq (%r15), %rdx
+ leaq 728(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 800(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 792(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 784(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 776(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 768(%rsp), %rax
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq 760(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 752(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 744(%rsp), %rax
+ movq %rax, 80(%rsp) ## 8-byte Spill
+ movq 728(%rsp), %rax
+ movq 736(%rsp), %r12
+ movq %rax, (%r14)
+ movq %r14, 72(%rsp) ## 8-byte Spill
+ movq 8(%r15), %rdx
+ leaq 648(%rsp), %rdi
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 720(%rsp), %r8
+ movq 712(%rsp), %rcx
+ movq 704(%rsp), %rdx
+ movq 696(%rsp), %rsi
+ movq 688(%rsp), %rdi
+ movq 680(%rsp), %rbp
+ addq 648(%rsp), %r12
+ movq 672(%rsp), %rax
+ movq 656(%rsp), %rbx
+ movq 664(%rsp), %r13
+ movq %r12, 8(%r14)
+ adcq 80(%rsp), %rbx ## 8-byte Folded Reload
+ adcq 40(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq %r15, 64(%rsp) ## 8-byte Spill
+ movq 16(%r15), %rdx
+ leaq 568(%rsp), %rdi
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 640(%rsp), %r8
+ movq 632(%rsp), %rcx
+ movq 624(%rsp), %rdx
+ movq 616(%rsp), %rsi
+ movq 608(%rsp), %rdi
+ movq 600(%rsp), %rbp
+ addq 568(%rsp), %rbx
+ movq 592(%rsp), %rax
+ movq 576(%rsp), %r14
+ movq 584(%rsp), %r12
+ movq 72(%rsp), %r15 ## 8-byte Reload
+ movq %rbx, 16(%r15)
+ adcq %r13, %r14
+ adcq 40(%rsp), %r12 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 24(%rsi), %rdx
+ leaq 488(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 560(%rsp), %r8
+ movq 552(%rsp), %rcx
+ movq 544(%rsp), %rdx
+ movq 536(%rsp), %rsi
+ movq 528(%rsp), %rdi
+ movq 520(%rsp), %rbp
+ addq 488(%rsp), %r14
+ movq 512(%rsp), %rax
+ movq 496(%rsp), %rbx
+ movq 504(%rsp), %r13
+ movq %r14, 24(%r15)
+ adcq %r12, %rbx
+ adcq 40(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 32(%rsi), %rdx
+ leaq 408(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 480(%rsp), %r8
+ movq 472(%rsp), %rcx
+ movq 464(%rsp), %rdx
+ movq 456(%rsp), %rsi
+ movq 448(%rsp), %rdi
+ movq 440(%rsp), %rbp
+ addq 408(%rsp), %rbx
+ movq 432(%rsp), %rax
+ movq 416(%rsp), %r14
+ movq 424(%rsp), %r12
+ movq %rbx, 32(%r15)
+ adcq %r13, %r14
+ adcq 40(%rsp), %r12 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 40(%rsi), %rdx
+ leaq 328(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 400(%rsp), %r8
+ movq 392(%rsp), %rcx
+ movq 384(%rsp), %rdx
+ movq 376(%rsp), %rsi
+ movq 368(%rsp), %rdi
+ movq 360(%rsp), %rbp
+ addq 328(%rsp), %r14
+ movq 352(%rsp), %rax
+ movq 336(%rsp), %rbx
+ movq 344(%rsp), %r13
+ movq %r14, 40(%r15)
+ adcq %r12, %rbx
+ adcq 40(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 48(%rsi), %rdx
+ leaq 248(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 320(%rsp), %r8
+ movq 312(%rsp), %rcx
+ movq 304(%rsp), %rdx
+ movq 296(%rsp), %rsi
+ movq 288(%rsp), %rdi
+ movq 280(%rsp), %rbp
+ addq 248(%rsp), %rbx
+ movq 272(%rsp), %rax
+ movq 256(%rsp), %r12
+ movq 264(%rsp), %r14
+ movq %rbx, 48(%r15)
+ adcq %r13, %r12
+ adcq 40(%rsp), %r14 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 56(%rsi), %rdx
+ leaq 168(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 240(%rsp), %r8
+ movq 232(%rsp), %rdx
+ movq 224(%rsp), %rsi
+ movq 216(%rsp), %rdi
+ movq 208(%rsp), %rbx
+ movq 200(%rsp), %rcx
+ addq 168(%rsp), %r12
+ movq 192(%rsp), %r15
+ movq 176(%rsp), %r13
+ movq 184(%rsp), %rbp
+ movq 72(%rsp), %rax ## 8-byte Reload
+ movq %r12, 56(%rax)
+ adcq %r14, %r13
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ adcq 48(%rsp), %r15 ## 8-byte Folded Reload
+ adcq 56(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %r12
+ adcq 8(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %r14
+ adcq 16(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 64(%rsi), %rdx
+ leaq 88(%rsp), %rdi
+ callq l_mulPv576x64
+ addq 88(%rsp), %r13
+ adcq 96(%rsp), %rbp
+ movq 160(%rsp), %r8
+ adcq 104(%rsp), %r15
+ movq 152(%rsp), %r9
+ movq 144(%rsp), %rdx
+ movq 136(%rsp), %rsi
+ movq 128(%rsp), %rdi
+ movq 120(%rsp), %rbx
+ movq 112(%rsp), %rax
+ movq 72(%rsp), %rcx ## 8-byte Reload
+ movq %r13, 64(%rcx)
+ movq %rbp, 72(%rcx)
+ adcq %r12, %rax
+ movq %r15, 80(%rcx)
+ movq %rax, 88(%rcx)
+ adcq %r14, %rbx
+ movq %rbx, 96(%rcx)
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 104(%rcx)
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 112(%rcx)
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 120(%rcx)
+ adcq 32(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 128(%rcx)
+ adcq $0, %r8
+ movq %r8, 136(%rcx)
+ addq $808, %rsp ## imm = 0x328
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_mont9Lbmi2: ## @mcl_fp_mont9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1560, %rsp ## imm = 0x618
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq %rdx, 96(%rsp) ## 8-byte Spill
+ movq %rsi, 88(%rsp) ## 8-byte Spill
+ movq %rdi, 112(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 80(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1480(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 1480(%rsp), %r14
+ movq 1488(%rsp), %r15
+ movq %r14, %rdx
+ imulq %rbx, %rdx
+ movq 1552(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 1544(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 1536(%rsp), %rax
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq 1528(%rsp), %r12
+ movq 1520(%rsp), %r13
+ movq 1512(%rsp), %rbx
+ movq 1504(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 1496(%rsp), %rbp
+ leaq 1400(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 1400(%rsp), %r14
+ adcq 1408(%rsp), %r15
+ adcq 1416(%rsp), %rbp
+ movq %rbp, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 1424(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 1432(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 1440(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ adcq 1448(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %rbx ## 8-byte Reload
+ adcq 1456(%rsp), %rbx
+ movq 40(%rsp), %r14 ## 8-byte Reload
+ adcq 1464(%rsp), %r14
+ movq 24(%rsp), %r13 ## 8-byte Reload
+ adcq 1472(%rsp), %r13
+ sbbq %rbp, %rbp
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1320(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebp
+ addq 1320(%rsp), %r15
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 1328(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 1336(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r12 ## 8-byte Reload
+ adcq 1344(%rsp), %r12
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 1352(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rax ## 8-byte Reload
+ adcq 1360(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ adcq 1368(%rsp), %rbx
+ adcq 1376(%rsp), %r14
+ movq %r14, 40(%rsp) ## 8-byte Spill
+ adcq 1384(%rsp), %r13
+ movq %r13, 24(%rsp) ## 8-byte Spill
+ adcq 1392(%rsp), %rbp
+ sbbq %r14, %r14
+ movq %r15, %rdx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 1240(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq %r14, %rax
+ andl $1, %eax
+ addq 1240(%rsp), %r15
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 1248(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 1256(%rsp), %r14
+ adcq 1264(%rsp), %r12
+ movq %r12, 32(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 1272(%rsp), %r12
+ movq 48(%rsp), %r13 ## 8-byte Reload
+ adcq 1280(%rsp), %r13
+ adcq 1288(%rsp), %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %r15 ## 8-byte Reload
+ adcq 1296(%rsp), %r15
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 1304(%rsp), %rbx
+ adcq 1312(%rsp), %rbp
+ adcq $0, %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 1160(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 8(%rsp), %rax ## 8-byte Reload
+ addq 1160(%rsp), %rax
+ adcq 1168(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r14 ## 8-byte Reload
+ adcq 1176(%rsp), %r14
+ adcq 1184(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ movq %r13, %r12
+ adcq 1192(%rsp), %r12
+ movq 56(%rsp), %rcx ## 8-byte Reload
+ adcq 1200(%rsp), %rcx
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ adcq 1208(%rsp), %r15
+ movq %r15, %r13
+ adcq 1216(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ adcq 1224(%rsp), %rbp
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 1232(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 1080(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq %r15, %rax
+ andl $1, %eax
+ addq 1080(%rsp), %rbx
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 1088(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq %r14, %r15
+ adcq 1096(%rsp), %r15
+ movq 16(%rsp), %r14 ## 8-byte Reload
+ adcq 1104(%rsp), %r14
+ movq %r12, %rbx
+ adcq 1112(%rsp), %rbx
+ movq 56(%rsp), %rcx ## 8-byte Reload
+ adcq 1120(%rsp), %rcx
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ adcq 1128(%rsp), %r13
+ movq %r13, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r13 ## 8-byte Reload
+ adcq 1136(%rsp), %r13
+ adcq 1144(%rsp), %rbp
+ movq 64(%rsp), %r12 ## 8-byte Reload
+ adcq 1152(%rsp), %r12
+ adcq $0, %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 1000(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq (%rsp), %rax ## 8-byte Reload
+ addq 1000(%rsp), %rax
+ adcq 1008(%rsp), %r15
+ movq %r15, 32(%rsp) ## 8-byte Spill
+ adcq 1016(%rsp), %r14
+ movq %r14, %r15
+ adcq 1024(%rsp), %rbx
+ movq %rbx, 48(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %r14 ## 8-byte Reload
+ adcq 1032(%rsp), %r14
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 1040(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ adcq 1048(%rsp), %r13
+ movq %r13, 24(%rsp) ## 8-byte Spill
+ adcq 1056(%rsp), %rbp
+ adcq 1064(%rsp), %r12
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 1072(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 920(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 920(%rsp), %r13
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 928(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ adcq 936(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r15 ## 8-byte Reload
+ adcq 944(%rsp), %r15
+ movq %r14, %r13
+ adcq 952(%rsp), %r13
+ movq 40(%rsp), %r14 ## 8-byte Reload
+ adcq 960(%rsp), %r14
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 968(%rsp), %rbx
+ adcq 976(%rsp), %rbp
+ adcq 984(%rsp), %r12
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 992(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 840(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 32(%rsp), %rax ## 8-byte Reload
+ addq 840(%rsp), %rax
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 848(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq 856(%rsp), %r15
+ adcq 864(%rsp), %r13
+ movq %r13, 56(%rsp) ## 8-byte Spill
+ adcq 872(%rsp), %r14
+ movq %r14, 40(%rsp) ## 8-byte Spill
+ adcq 880(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ adcq 888(%rsp), %rbp
+ adcq 896(%rsp), %r12
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq 904(%rsp), %r13
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 912(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 760(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 760(%rsp), %r14
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 768(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq 776(%rsp), %r15
+ movq 56(%rsp), %r14 ## 8-byte Reload
+ adcq 784(%rsp), %r14
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 792(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 800(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 808(%rsp), %rbp
+ movq %r12, %rbx
+ adcq 816(%rsp), %rbx
+ movq %r13, %r12
+ adcq 824(%rsp), %r12
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 832(%rsp), %r13
+ adcq $0, %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 680(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 16(%rsp), %rax ## 8-byte Reload
+ addq 680(%rsp), %rax
+ adcq 688(%rsp), %r15
+ movq %r15, 48(%rsp) ## 8-byte Spill
+ adcq 696(%rsp), %r14
+ movq %r14, 56(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 704(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r15 ## 8-byte Reload
+ adcq 712(%rsp), %r15
+ adcq 720(%rsp), %rbp
+ adcq 728(%rsp), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ adcq 736(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ adcq 744(%rsp), %r13
+ movq %r13, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r13 ## 8-byte Reload
+ adcq 752(%rsp), %r13
+ sbbq %r14, %r14
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 600(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %r14d
+ addq 600(%rsp), %rbx
+ movq 48(%rsp), %rax ## 8-byte Reload
+ adcq 608(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %rax ## 8-byte Reload
+ adcq 616(%rsp), %rax
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rbx ## 8-byte Reload
+ adcq 624(%rsp), %rbx
+ adcq 632(%rsp), %r15
+ movq %r15, 24(%rsp) ## 8-byte Spill
+ adcq 640(%rsp), %rbp
+ movq 64(%rsp), %r12 ## 8-byte Reload
+ adcq 648(%rsp), %r12
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 656(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r15 ## 8-byte Reload
+ adcq 664(%rsp), %r15
+ adcq 672(%rsp), %r13
+ adcq $0, %r14
+ movq %r14, 16(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 520(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 48(%rsp), %rax ## 8-byte Reload
+ addq 520(%rsp), %rax
+ movq 56(%rsp), %r14 ## 8-byte Reload
+ adcq 528(%rsp), %r14
+ adcq 536(%rsp), %rbx
+ movq %rbx, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 544(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 552(%rsp), %rbp
+ adcq 560(%rsp), %r12
+ movq %r12, 64(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r12 ## 8-byte Reload
+ adcq 568(%rsp), %r12
+ adcq 576(%rsp), %r15
+ movq %r15, (%rsp) ## 8-byte Spill
+ adcq 584(%rsp), %r13
+ movq %r13, 32(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r15 ## 8-byte Reload
+ adcq 592(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 440(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 440(%rsp), %r13
+ adcq 448(%rsp), %r14
+ movq %r14, 56(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %r14 ## 8-byte Reload
+ adcq 456(%rsp), %r14
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 464(%rsp), %rbx
+ adcq 472(%rsp), %rbp
+ movq %rbp, 104(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ adcq 488(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 496(%rsp), %rbp
+ movq 32(%rsp), %r12 ## 8-byte Reload
+ adcq 504(%rsp), %r12
+ adcq 512(%rsp), %r15
+ movq %r15, %r13
+ adcq $0, %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 360(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 56(%rsp), %rax ## 8-byte Reload
+ addq 360(%rsp), %rax
+ adcq 368(%rsp), %r14
+ adcq 376(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ movq 104(%rsp), %rcx ## 8-byte Reload
+ adcq 384(%rsp), %rcx
+ movq %rcx, 104(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rbx ## 8-byte Reload
+ adcq 392(%rsp), %rbx
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ adcq 400(%rsp), %r15
+ adcq 408(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 416(%rsp), %r12
+ movq %r12, %rbp
+ adcq 424(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 280(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %r13d
+ addq 280(%rsp), %r12
+ adcq 288(%rsp), %r14
+ movq %r14, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 296(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 104(%rsp), %r14 ## 8-byte Reload
+ adcq 304(%rsp), %r14
+ adcq 312(%rsp), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ adcq 320(%rsp), %r15
+ movq %r15, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %rbx ## 8-byte Reload
+ adcq 328(%rsp), %rbx
+ adcq 336(%rsp), %rbp
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 344(%rsp), %r12
+ movq 48(%rsp), %rbp ## 8-byte Reload
+ adcq 352(%rsp), %rbp
+ adcq $0, %r13
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 200(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 40(%rsp), %rax ## 8-byte Reload
+ addq 200(%rsp), %rax
+ movq 24(%rsp), %r15 ## 8-byte Reload
+ adcq 208(%rsp), %r15
+ adcq 216(%rsp), %r14
+ movq %r14, 104(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %r14 ## 8-byte Reload
+ adcq 224(%rsp), %r14
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 232(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq 240(%rsp), %rbx
+ movq %rbx, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 248(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ adcq 256(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq 264(%rsp), %rbp
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 272(%rsp), %r13
+ sbbq %rbx, %rbx
+ movq 80(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %r12
+ leaq 120(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebx
+ addq 120(%rsp), %r12
+ adcq 128(%rsp), %r15
+ movq 104(%rsp), %rbp ## 8-byte Reload
+ adcq 136(%rsp), %rbp
+ movq %r14, %rcx
+ adcq 144(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r8 ## 8-byte Reload
+ adcq 152(%rsp), %r8
+ movq %r8, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r9 ## 8-byte Reload
+ adcq 160(%rsp), %r9
+ movq %r9, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r10 ## 8-byte Reload
+ adcq 168(%rsp), %r10
+ movq %r10, 32(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rdi ## 8-byte Reload
+ adcq 176(%rsp), %rdi
+ movq %rdi, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r14 ## 8-byte Reload
+ adcq 184(%rsp), %r14
+ adcq 192(%rsp), %r13
+ adcq $0, %rbx
+ movq %r15, %rsi
+ movq %r15, %r12
+ movq 72(%rsp), %rdx ## 8-byte Reload
+ subq (%rdx), %rsi
+ movq %rbp, %rax
+ movq %rbp, %r15
+ sbbq 8(%rdx), %rax
+ movq %rcx, %rbp
+ sbbq 16(%rdx), %rbp
+ movq %r8, %rcx
+ sbbq 24(%rdx), %rcx
+ movq %r9, %r8
+ sbbq 32(%rdx), %r8
+ movq %r10, %r11
+ sbbq 40(%rdx), %r11
+ movq %rdi, %r10
+ sbbq 48(%rdx), %r10
+ movq %r14, %rdi
+ sbbq 56(%rdx), %rdi
+ movq %r13, %r9
+ sbbq 64(%rdx), %r9
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %r13, %r9
+ testb %bl, %bl
+ cmovneq %r12, %rsi
+ movq 112(%rsp), %rbx ## 8-byte Reload
+ movq %rsi, (%rbx)
+ cmovneq %r15, %rax
+ movq %rax, 8(%rbx)
+ cmovneq 64(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 16(%rbx)
+ cmovneq 8(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rbx)
+ cmovneq (%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 32(%rbx)
+ cmovneq 32(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 40(%rbx)
+ cmovneq 16(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 48(%rbx)
+ cmovneq %r14, %rdi
+ movq %rdi, 56(%rbx)
+ movq %r9, 64(%rbx)
+ addq $1560, %rsp ## imm = 0x618
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montNF9Lbmi2: ## @mcl_fp_montNF9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1560, %rsp ## imm = 0x618
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq %rdx, 80(%rsp) ## 8-byte Spill
+ movq %rsi, 88(%rsp) ## 8-byte Spill
+ movq %rdi, 112(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 96(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1480(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 1480(%rsp), %r12
+ movq 1488(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq %r12, %rdx
+ imulq %rbx, %rdx
+ movq 1552(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 1544(%rsp), %r13
+ movq 1536(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 1528(%rsp), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ movq 1520(%rsp), %r14
+ movq 1512(%rsp), %r15
+ movq 1504(%rsp), %rbx
+ movq 1496(%rsp), %rbp
+ leaq 1400(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 1400(%rsp), %r12
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 1408(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ adcq 1416(%rsp), %rbp
+ movq %rbp, 104(%rsp) ## 8-byte Spill
+ adcq 1424(%rsp), %rbx
+ movq %rbx, (%rsp) ## 8-byte Spill
+ adcq 1432(%rsp), %r15
+ movq %r15, 8(%rsp) ## 8-byte Spill
+ adcq 1440(%rsp), %r14
+ movq %r14, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rbx ## 8-byte Reload
+ adcq 1448(%rsp), %rbx
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 1456(%rsp), %r12
+ adcq 1464(%rsp), %r13
+ movq %r13, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 1472(%rsp), %rbp
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1320(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 1392(%rsp), %rax
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ addq 1320(%rsp), %rcx
+ movq 104(%rsp), %r15 ## 8-byte Reload
+ adcq 1328(%rsp), %r15
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 1336(%rsp), %r14
+ movq 8(%rsp), %rdx ## 8-byte Reload
+ adcq 1344(%rsp), %rdx
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r13 ## 8-byte Reload
+ adcq 1352(%rsp), %r13
+ adcq 1360(%rsp), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ adcq 1368(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rdx ## 8-byte Reload
+ adcq 1376(%rsp), %rdx
+ movq %rdx, 40(%rsp) ## 8-byte Spill
+ adcq 1384(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, %rbp
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 1240(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 1240(%rsp), %rbx
+ adcq 1248(%rsp), %r15
+ movq %r15, 104(%rsp) ## 8-byte Spill
+ adcq 1256(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r12 ## 8-byte Reload
+ adcq 1264(%rsp), %r12
+ adcq 1272(%rsp), %r13
+ movq %r13, %r14
+ movq 64(%rsp), %r13 ## 8-byte Reload
+ adcq 1280(%rsp), %r13
+ movq 48(%rsp), %rbx ## 8-byte Reload
+ adcq 1288(%rsp), %rbx
+ movq 40(%rsp), %r15 ## 8-byte Reload
+ adcq 1296(%rsp), %r15
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 1304(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 1312(%rsp), %rbp
+ movq %rbp, 56(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 1160(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 1232(%rsp), %rax
+ movq 104(%rsp), %rcx ## 8-byte Reload
+ addq 1160(%rsp), %rcx
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 1168(%rsp), %rbp
+ adcq 1176(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ adcq 1184(%rsp), %r14
+ adcq 1192(%rsp), %r13
+ movq %r13, %r12
+ adcq 1200(%rsp), %rbx
+ movq %rbx, 48(%rsp) ## 8-byte Spill
+ adcq 1208(%rsp), %r15
+ movq %r15, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 1216(%rsp), %rbx
+ movq 56(%rsp), %rdx ## 8-byte Reload
+ adcq 1224(%rsp), %rdx
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ adcq $0, %r15
+ movq %rcx, %rdx
+ movq %rcx, %r13
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 1080(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 1080(%rsp), %r13
+ adcq 1088(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq 1096(%rsp), %r13
+ adcq 1104(%rsp), %r14
+ adcq 1112(%rsp), %r12
+ movq %r12, 64(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 1120(%rsp), %r12
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 1128(%rsp), %rbp
+ adcq 1136(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %rbx ## 8-byte Reload
+ adcq 1144(%rsp), %rbx
+ adcq 1152(%rsp), %r15
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 1000(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 1072(%rsp), %rax
+ movq (%rsp), %rcx ## 8-byte Reload
+ addq 1000(%rsp), %rcx
+ adcq 1008(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ adcq 1016(%rsp), %r14
+ movq %r14, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %r14 ## 8-byte Reload
+ adcq 1024(%rsp), %r14
+ adcq 1032(%rsp), %r12
+ adcq 1040(%rsp), %rbp
+ movq %rbp, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r13 ## 8-byte Reload
+ adcq 1048(%rsp), %r13
+ adcq 1056(%rsp), %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ adcq 1064(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 920(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 920(%rsp), %rbx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 928(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rbp ## 8-byte Reload
+ adcq 936(%rsp), %rbp
+ movq %r14, %rbx
+ adcq 944(%rsp), %rbx
+ adcq 952(%rsp), %r12
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 960(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 968(%rsp), %r13
+ movq %r13, %r15
+ movq 56(%rsp), %r13 ## 8-byte Reload
+ adcq 976(%rsp), %r13
+ movq 16(%rsp), %r14 ## 8-byte Reload
+ adcq 984(%rsp), %r14
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 992(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 840(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 912(%rsp), %rax
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ addq 840(%rsp), %rcx
+ adcq 848(%rsp), %rbp
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq 856(%rsp), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ adcq 864(%rsp), %r12
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 872(%rsp), %rbp
+ adcq 880(%rsp), %r15
+ movq %r15, 24(%rsp) ## 8-byte Spill
+ adcq 888(%rsp), %r13
+ adcq 896(%rsp), %r14
+ movq %r14, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %rdx ## 8-byte Reload
+ adcq 904(%rsp), %rdx
+ movq %rdx, (%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, %r14
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 760(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 760(%rsp), %rbx
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 768(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %r15 ## 8-byte Reload
+ adcq 776(%rsp), %r15
+ adcq 784(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ movq %rbp, %rbx
+ adcq 792(%rsp), %rbx
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 800(%rsp), %rbp
+ adcq 808(%rsp), %r13
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 816(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %r12 ## 8-byte Reload
+ adcq 824(%rsp), %r12
+ adcq 832(%rsp), %r14
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 680(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 752(%rsp), %rcx
+ movq 32(%rsp), %rax ## 8-byte Reload
+ addq 680(%rsp), %rax
+ adcq 688(%rsp), %r15
+ movq %r15, 64(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rdx ## 8-byte Reload
+ adcq 696(%rsp), %rdx
+ movq %rdx, 48(%rsp) ## 8-byte Spill
+ adcq 704(%rsp), %rbx
+ movq %rbx, 40(%rsp) ## 8-byte Spill
+ adcq 712(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq 720(%rsp), %r13
+ movq %r13, %r15
+ movq 16(%rsp), %rbx ## 8-byte Reload
+ adcq 728(%rsp), %rbx
+ adcq 736(%rsp), %r12
+ movq %r12, (%rsp) ## 8-byte Spill
+ adcq 744(%rsp), %r14
+ movq %r14, 32(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 600(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 600(%rsp), %r13
+ movq 64(%rsp), %r13 ## 8-byte Reload
+ adcq 608(%rsp), %r13
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 616(%rsp), %r12
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 624(%rsp), %rbp
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 632(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 640(%rsp), %r15
+ movq %r15, 56(%rsp) ## 8-byte Spill
+ adcq 648(%rsp), %rbx
+ movq %rbx, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 656(%rsp), %r14
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 664(%rsp), %rbx
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ adcq 672(%rsp), %r15
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 520(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 592(%rsp), %rcx
+ movq %r13, %rax
+ addq 520(%rsp), %rax
+ adcq 528(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ movq %rbp, %r12
+ adcq 536(%rsp), %r12
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 544(%rsp), %rbp
+ movq 56(%rsp), %rdx ## 8-byte Reload
+ adcq 552(%rsp), %rdx
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rdx ## 8-byte Reload
+ adcq 560(%rsp), %rdx
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 568(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ adcq 576(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 584(%rsp), %r15
+ movq %r15, 8(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, %r13
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 440(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 440(%rsp), %r14
+ movq 48(%rsp), %rax ## 8-byte Reload
+ adcq 448(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ adcq 456(%rsp), %r12
+ adcq 464(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %r14 ## 8-byte Reload
+ adcq 472(%rsp), %r14
+ movq 16(%rsp), %r15 ## 8-byte Reload
+ adcq 480(%rsp), %r15
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 488(%rsp), %rbp
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 496(%rsp), %rbx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 504(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ adcq 512(%rsp), %r13
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 360(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 432(%rsp), %rcx
+ movq 48(%rsp), %rax ## 8-byte Reload
+ addq 360(%rsp), %rax
+ adcq 368(%rsp), %r12
+ movq %r12, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rdx ## 8-byte Reload
+ adcq 376(%rsp), %rdx
+ movq %rdx, 24(%rsp) ## 8-byte Spill
+ adcq 384(%rsp), %r14
+ movq %r14, 56(%rsp) ## 8-byte Spill
+ adcq 392(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ adcq 400(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 408(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 416(%rsp), %r14
+ adcq 424(%rsp), %r13
+ movq %r13, %r15
+ adcq $0, %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 280(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 280(%rsp), %r12
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 288(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 296(%rsp), %rbp
+ movq 56(%rsp), %rax ## 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 312(%rsp), %r13
+ movq (%rsp), %r12 ## 8-byte Reload
+ adcq 320(%rsp), %r12
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 328(%rsp), %rbx
+ adcq 336(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ adcq 344(%rsp), %r15
+ movq %r15, 64(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r14 ## 8-byte Reload
+ adcq 352(%rsp), %r14
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 200(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 272(%rsp), %rcx
+ movq 40(%rsp), %rax ## 8-byte Reload
+ addq 200(%rsp), %rax
+ adcq 208(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %rbp ## 8-byte Reload
+ adcq 216(%rsp), %rbp
+ adcq 224(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ adcq 232(%rsp), %r12
+ movq %r12, (%rsp) ## 8-byte Spill
+ adcq 240(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ adcq 248(%rsp), %r15
+ movq 64(%rsp), %r12 ## 8-byte Reload
+ adcq 256(%rsp), %r12
+ adcq 264(%rsp), %r14
+ adcq $0, %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbx
+ leaq 120(%rsp), %rdi
+ movq 72(%rsp), %r13 ## 8-byte Reload
+ movq %r13, %rsi
+ callq l_mulPv576x64
+ addq 120(%rsp), %rbx
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 128(%rsp), %rcx
+ movq %rbp, %rdx
+ adcq 136(%rsp), %rdx
+ movq 16(%rsp), %rsi ## 8-byte Reload
+ adcq 144(%rsp), %rsi
+ movq %rsi, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %rdi ## 8-byte Reload
+ adcq 152(%rsp), %rdi
+ movq %rdi, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 160(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ movq %r15, %r8
+ adcq 168(%rsp), %r8
+ movq %r8, 8(%rsp) ## 8-byte Spill
+ movq %r12, %r15
+ adcq 176(%rsp), %r15
+ adcq 184(%rsp), %r14
+ movq 40(%rsp), %r9 ## 8-byte Reload
+ adcq 192(%rsp), %r9
+ movq %rcx, %rax
+ movq %rcx, %r11
+ movq %r13, %rbp
+ subq (%rbp), %rax
+ movq %rdx, %rcx
+ movq %rdx, %r12
+ sbbq 8(%rbp), %rcx
+ movq %rsi, %rdx
+ sbbq 16(%rbp), %rdx
+ movq %rdi, %rsi
+ sbbq 24(%rbp), %rsi
+ movq %rbx, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %r8, %r10
+ sbbq 40(%rbp), %r10
+ movq %r15, %r13
+ sbbq 48(%rbp), %r13
+ movq %r14, %r8
+ sbbq 56(%rbp), %r8
+ movq %rbp, %rbx
+ movq %r9, %rbp
+ sbbq 64(%rbx), %rbp
+ movq %rbp, %rbx
+ sarq $63, %rbx
+ cmovsq %r11, %rax
+ movq 112(%rsp), %rbx ## 8-byte Reload
+ movq %rax, (%rbx)
+ cmovsq %r12, %rcx
+ movq %rcx, 8(%rbx)
+ cmovsq 16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rbx)
+ cmovsq (%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovsq 32(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 32(%rbx)
+ cmovsq 8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 40(%rbx)
+ cmovsq %r15, %r13
+ movq %r13, 48(%rbx)
+ cmovsq %r14, %r8
+ movq %r8, 56(%rbx)
+ cmovsq %r9, %rbp
+ movq %rbp, 64(%rbx)
+ addq $1560, %rsp ## imm = 0x618
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_montRed9Lbmi2: ## @mcl_fp_montRed9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $936, %rsp ## imm = 0x3A8
+ movq %rdx, %rax
+ movq %rdi, 208(%rsp) ## 8-byte Spill
+ movq -8(%rax), %rcx
+ movq %rcx, 96(%rsp) ## 8-byte Spill
+ movq (%rsi), %r14
+ movq 8(%rsi), %rdx
+ movq %rdx, (%rsp) ## 8-byte Spill
+ movq %r14, %rdx
+ imulq %rcx, %rdx
+ movq 136(%rsi), %rcx
+ movq %rcx, 88(%rsp) ## 8-byte Spill
+ movq 128(%rsi), %rcx
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ movq 120(%rsi), %rcx
+ movq %rcx, 80(%rsp) ## 8-byte Spill
+ movq 112(%rsi), %rcx
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq 104(%rsi), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq 96(%rsi), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 88(%rsi), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 80(%rsi), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ movq 72(%rsi), %r12
+ movq 64(%rsi), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 56(%rsi), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq 48(%rsi), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 40(%rsi), %rbp
+ movq 32(%rsi), %rbx
+ movq 24(%rsi), %r13
+ movq 16(%rsi), %r15
+ movq %rax, %rcx
+ movq (%rcx), %rax
+ movq %rax, 144(%rsp) ## 8-byte Spill
+ movq 64(%rcx), %rax
+ movq %rax, 200(%rsp) ## 8-byte Spill
+ movq 56(%rcx), %rax
+ movq %rax, 192(%rsp) ## 8-byte Spill
+ movq 48(%rcx), %rax
+ movq %rax, 184(%rsp) ## 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 176(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 168(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, 160(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 152(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, 136(%rsp) ## 8-byte Spill
+ movq %rcx, %rsi
+ movq %rsi, 104(%rsp) ## 8-byte Spill
+ leaq 856(%rsp), %rdi
+ callq l_mulPv576x64
+ addq 856(%rsp), %r14
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 864(%rsp), %rcx
+ adcq 872(%rsp), %r15
+ adcq 880(%rsp), %r13
+ adcq 888(%rsp), %rbx
+ movq %rbx, 120(%rsp) ## 8-byte Spill
+ adcq 896(%rsp), %rbp
+ movq %rbp, 112(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ adcq 904(%rsp), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 912(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 920(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 928(%rsp), %r12
+ movq %r12, (%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq $0, %rbp
+ adcq $0, 8(%rsp) ## 8-byte Folded Spill
+ adcq $0, 16(%rsp) ## 8-byte Folded Spill
+ adcq $0, 48(%rsp) ## 8-byte Folded Spill
+ adcq $0, 72(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 56(%rsp) ## 8-byte Folded Spill
+ movq 88(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ sbbq %r12, %r12
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 776(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %r12d
+ addq 776(%rsp), %rbx
+ adcq 784(%rsp), %r15
+ adcq 792(%rsp), %r13
+ movq %r13, 128(%rsp) ## 8-byte Spill
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 800(%rsp), %rax
+ movq %rax, 120(%rsp) ## 8-byte Spill
+ movq 112(%rsp), %rax ## 8-byte Reload
+ adcq 808(%rsp), %rax
+ movq %rax, 112(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ adcq 816(%rsp), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 824(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 832(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 840(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 848(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq $0, %r13
+ adcq $0, 16(%rsp) ## 8-byte Folded Spill
+ adcq $0, 48(%rsp) ## 8-byte Folded Spill
+ adcq $0, 72(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ movq 56(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, %r14
+ movq %r14, 88(%rsp) ## 8-byte Spill
+ adcq $0, %r12
+ movq %r15, %rdx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 696(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 696(%rsp), %r15
+ movq 128(%rsp), %rcx ## 8-byte Reload
+ adcq 704(%rsp), %rcx
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 712(%rsp), %rax
+ movq %rax, 120(%rsp) ## 8-byte Spill
+ movq 112(%rsp), %rax ## 8-byte Reload
+ adcq 720(%rsp), %rax
+ movq %rax, 112(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rbp ## 8-byte Reload
+ adcq 728(%rsp), %rbp
+ movq 32(%rsp), %r14 ## 8-byte Reload
+ adcq 736(%rsp), %r14
+ movq 40(%rsp), %r15 ## 8-byte Reload
+ adcq 744(%rsp), %r15
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 752(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 760(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ adcq $0, 16(%rsp) ## 8-byte Folded Spill
+ movq 48(%rsp), %r13 ## 8-byte Reload
+ adcq $0, %r13
+ adcq $0, 72(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ adcq $0, 88(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rcx, %rbx
+ movq %rbx, %rdx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 616(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 616(%rsp), %rbx
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 624(%rsp), %rax
+ movq 112(%rsp), %rcx ## 8-byte Reload
+ adcq 632(%rsp), %rcx
+ movq %rcx, 112(%rsp) ## 8-byte Spill
+ adcq 640(%rsp), %rbp
+ movq %rbp, 64(%rsp) ## 8-byte Spill
+ adcq 648(%rsp), %r14
+ movq %r14, 32(%rsp) ## 8-byte Spill
+ adcq 656(%rsp), %r15
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 664(%rsp), %r14
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 672(%rsp), %rbp
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 680(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 688(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r13
+ movq %r13, 48(%rsp) ## 8-byte Spill
+ adcq $0, 72(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 56(%rsp) ## 8-byte Folded Spill
+ adcq $0, 88(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 536(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 536(%rsp), %rbx
+ movq 112(%rsp), %rax ## 8-byte Reload
+ adcq 544(%rsp), %rax
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 552(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 560(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ adcq 568(%rsp), %r15
+ movq %r15, 40(%rsp) ## 8-byte Spill
+ adcq 576(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ adcq 584(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq 592(%rsp), %r13
+ movq 16(%rsp), %r15 ## 8-byte Reload
+ adcq 600(%rsp), %r15
+ movq 48(%rsp), %rbp ## 8-byte Reload
+ adcq 608(%rsp), %rbp
+ movq 72(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 56(%rsp) ## 8-byte Folded Spill
+ adcq $0, 88(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 456(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 456(%rsp), %r14
+ movq 64(%rsp), %rax ## 8-byte Reload
+ adcq 464(%rsp), %rax
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 472(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 488(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 496(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 504(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ adcq 512(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ adcq 520(%rsp), %rbp
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 528(%rsp), %rbx
+ movq %rbx, 72(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ movq 56(%rsp), %r13 ## 8-byte Reload
+ adcq $0, %r13
+ movq 88(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 376(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 376(%rsp), %r15
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 384(%rsp), %rax
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 392(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 400(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 408(%rsp), %rbp
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 416(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 424(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq 72(%rsp), %r15 ## 8-byte Reload
+ adcq 440(%rsp), %r15
+ adcq 448(%rsp), %r14
+ movq %r14, 80(%rsp) ## 8-byte Spill
+ adcq $0, %r13
+ movq %r13, %r14
+ adcq $0, %rbx
+ movq %rbx, 88(%rsp) ## 8-byte Spill
+ adcq $0, %r12
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 296(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 296(%rsp), %rbx
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 312(%rsp), %r13
+ adcq 320(%rsp), %rbp
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 328(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 336(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 344(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ adcq 352(%rsp), %r15
+ movq %r15, 72(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %r15 ## 8-byte Reload
+ adcq 360(%rsp), %r15
+ adcq 368(%rsp), %r14
+ movq %r14, 56(%rsp) ## 8-byte Spill
+ movq 88(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ adcq $0, %r12
+ movq 96(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbx
+ leaq 216(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 216(%rsp), %rbx
+ movq %r13, %rsi
+ adcq 224(%rsp), %rsi
+ movq %rsi, (%rsp) ## 8-byte Spill
+ adcq 232(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r9 ## 8-byte Reload
+ adcq 240(%rsp), %r9
+ movq %r9, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r8 ## 8-byte Reload
+ adcq 248(%rsp), %r8
+ movq %r8, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rbx ## 8-byte Reload
+ adcq 256(%rsp), %rbx
+ movq 72(%rsp), %rax ## 8-byte Reload
+ adcq 264(%rsp), %rax
+ movq %r15, %rcx
+ adcq 272(%rsp), %rcx
+ movq 56(%rsp), %rdx ## 8-byte Reload
+ adcq 280(%rsp), %rdx
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ adcq 288(%rsp), %r14
+ movq %r14, %r11
+ adcq $0, %r12
+ subq 144(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rbp, %rdi
+ sbbq 136(%rsp), %rdi ## 8-byte Folded Reload
+ movq %r9, %rbp
+ sbbq 152(%rsp), %rbp ## 8-byte Folded Reload
+ movq %r8, %r13
+ sbbq 160(%rsp), %r13 ## 8-byte Folded Reload
+ movq %rbx, %r15
+ sbbq 168(%rsp), %r15 ## 8-byte Folded Reload
+ movq %rax, %r14
+ sbbq 176(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rcx, %r10
+ sbbq 184(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rdx, %r8
+ sbbq 192(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r11, %r9
+ sbbq 200(%rsp), %r9 ## 8-byte Folded Reload
+ sbbq $0, %r12
+ andl $1, %r12d
+ cmovneq %r11, %r9
+ testb %r12b, %r12b
+ cmovneq (%rsp), %rsi ## 8-byte Folded Reload
+ movq 208(%rsp), %rdx ## 8-byte Reload
+ movq %rsi, (%rdx)
+ cmovneq 24(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 8(%rdx)
+ cmovneq 8(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 16(%rdx)
+ cmovneq 16(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 24(%rdx)
+ cmovneq %rbx, %r15
+ movq %r15, 32(%rdx)
+ cmovneq %rax, %r14
+ movq %r14, 40(%rdx)
+ cmovneq %rcx, %r10
+ movq %r10, 48(%rdx)
+ cmovneq 56(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 56(%rdx)
+ movq %r9, 64(%rdx)
+ addq $936, %rsp ## imm = 0x3A8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addPre9Lbmi2: ## @mcl_fp_addPre9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r8
+ movq 64(%rsi), %r15
+ movq 56(%rsi), %r9
+ movq 48(%rsi), %r10
+ movq 40(%rsi), %r11
+ movq 24(%rsi), %r12
+ movq 32(%rsi), %r14
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rcx
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rcx
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r12
+ movq 56(%rdx), %r13
+ movq 48(%rdx), %rsi
+ movq 40(%rdx), %rbp
+ movq 32(%rdx), %rdx
+ movq %rbx, (%rdi)
+ movq %rcx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r12, 24(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 32(%rdi)
+ adcq %r11, %rbp
+ movq %rbp, 40(%rdi)
+ adcq %r10, %rsi
+ movq %rsi, 48(%rdi)
+ adcq %r9, %r13
+ movq %r13, 56(%rdi)
+ adcq %r8, %r15
+ movq %r15, 64(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_subPre9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subPre9Lbmi2: ## @mcl_fp_subPre9Lbmi2
+## BB#0:
+ movq 32(%rdx), %r8
+ movq (%rsi), %rcx
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ movq %rcx, (%rdi)
+ movq 8(%rsi), %rcx
+ sbbq 8(%rdx), %rcx
+ movq %rcx, 8(%rdi)
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq %rcx, 16(%rdi)
+ movq 24(%rsi), %rcx
+ sbbq 24(%rdx), %rcx
+ movq %rcx, 24(%rdi)
+ movq 32(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 40(%rdx), %r8
+ movq %rcx, 32(%rdi)
+ movq 40(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 48(%rdx), %r8
+ movq %rcx, 40(%rdi)
+ movq 48(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 56(%rdx), %r8
+ movq %rcx, 48(%rdi)
+ movq 56(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq %rcx, 56(%rdi)
+ movq 64(%rdx), %rcx
+ movq 64(%rsi), %rdx
+ sbbq %rcx, %rdx
+ movq %rdx, 64(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_shr1_9Lbmi2: ## @mcl_fp_shr1_9Lbmi2
+## BB#0:
+ pushq %rbx
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %r9
+ movq 48(%rsi), %r10
+ movq 40(%rsi), %r11
+ movq 32(%rsi), %rcx
+ movq 24(%rsi), %rdx
+ movq 16(%rsi), %rax
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rbx
+ movq %rbx, (%rdi)
+ shrdq $1, %rax, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rdx, %rax
+ movq %rax, 16(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 24(%rdi)
+ shrdq $1, %r11, %rcx
+ movq %rcx, 32(%rdi)
+ shrdq $1, %r10, %r11
+ movq %r11, 40(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 48(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 56(%rdi)
+ shrq %r8
+ movq %r8, 64(%rdi)
+ popq %rbx
+ retq
+
+ .globl _mcl_fp_add9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_add9Lbmi2: ## @mcl_fp_add9Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r12
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %r13
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r10
+ movq 24(%rsi), %r14
+ movq 32(%rsi), %r11
+ movq (%rdx), %rbx
+ movq 8(%rdx), %r15
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %r15
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r14
+ adcq 32(%rdx), %r11
+ adcq 40(%rdx), %r10
+ movq 56(%rdx), %rsi
+ adcq 48(%rdx), %r9
+ movq %rbx, (%rdi)
+ movq %r15, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r14, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 56(%rdi)
+ adcq %r12, %r8
+ movq %r8, 64(%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %rbx
+ sbbq 8(%rcx), %r15
+ sbbq 16(%rcx), %rax
+ sbbq 24(%rcx), %r14
+ sbbq 32(%rcx), %r11
+ sbbq 40(%rcx), %r10
+ sbbq 48(%rcx), %r9
+ sbbq 56(%rcx), %rsi
+ sbbq 64(%rcx), %r8
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne LBB136_2
+## BB#1: ## %nocarry
+ movq %rbx, (%rdi)
+ movq %r15, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r14, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %rsi, 56(%rdi)
+ movq %r8, 64(%rdi)
+LBB136_2: ## %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addNF9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_addNF9Lbmi2: ## @mcl_fp_addNF9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, %r8
+ movq 64(%rdx), %r10
+ movq 56(%rdx), %r11
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rax
+ movq 32(%rdx), %rdi
+ movq 24(%rdx), %rbp
+ movq 16(%rdx), %r15
+ movq (%rdx), %rbx
+ movq 8(%rdx), %r13
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %r13
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %rbp
+ movq %rbp, -24(%rsp) ## 8-byte Spill
+ adcq 32(%rsi), %rdi
+ movq %rdi, -40(%rsp) ## 8-byte Spill
+ adcq 40(%rsi), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %r9
+ movq %r9, %rdi
+ movq %rdi, -16(%rsp) ## 8-byte Spill
+ adcq 56(%rsi), %r11
+ movq %r11, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ adcq 64(%rsi), %r10
+ movq %r10, %r9
+ movq %rbx, %rsi
+ subq (%rcx), %rsi
+ movq %r13, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r15, %r12
+ sbbq 16(%rcx), %r12
+ sbbq 24(%rcx), %rbp
+ movq -40(%rsp), %r14 ## 8-byte Reload
+ sbbq 32(%rcx), %r14
+ movq -32(%rsp), %r11 ## 8-byte Reload
+ sbbq 40(%rcx), %r11
+ movq %rdi, %r10
+ sbbq 48(%rcx), %r10
+ movq %rax, %rdi
+ sbbq 56(%rcx), %rdi
+ movq %r9, %rax
+ sbbq 64(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %rbx, %rsi
+ movq %rsi, (%r8)
+ cmovsq %r13, %rdx
+ movq %rdx, 8(%r8)
+ cmovsq %r15, %r12
+ movq %r12, 16(%r8)
+ cmovsq -24(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 24(%r8)
+ cmovsq -40(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, 32(%r8)
+ cmovsq -32(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 40(%r8)
+ cmovsq -16(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 48(%r8)
+ cmovsq -8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%r8)
+ cmovsq %r9, %rax
+ movq %rax, 64(%r8)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_sub9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_sub9Lbmi2: ## @mcl_fp_sub9Lbmi2
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r13
+ movq (%rsi), %rax
+ movq 8(%rsi), %r9
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r9
+ movq 16(%rsi), %r10
+ sbbq 16(%rdx), %r10
+ movq 24(%rsi), %r11
+ sbbq 24(%rdx), %r11
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 40(%rsi), %r14
+ sbbq 40(%rdx), %r14
+ movq 48(%rsi), %r15
+ sbbq 48(%rdx), %r15
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %rsi
+ sbbq 56(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r12, 32(%rdi)
+ movq %r14, 40(%rdi)
+ movq %r15, 48(%rdi)
+ movq %rsi, 56(%rdi)
+ sbbq %r13, %r8
+ movq %r8, 64(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB138_2
+## BB#1: ## %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ movq 8(%rcx), %rax
+ adcq %r9, %rax
+ movq %rax, 8(%rdi)
+ movq 16(%rcx), %rax
+ adcq %r10, %rax
+ movq %rax, 16(%rdi)
+ movq 24(%rcx), %rax
+ adcq %r11, %rax
+ movq %rax, 24(%rdi)
+ movq 32(%rcx), %rax
+ adcq %r12, %rax
+ movq %rax, 32(%rdi)
+ movq 40(%rcx), %rax
+ adcq %r14, %rax
+ movq %rax, 40(%rdi)
+ movq 48(%rcx), %rax
+ adcq %r15, %rax
+ movq %rax, 48(%rdi)
+ movq 56(%rcx), %rax
+ adcq %rsi, %rax
+ movq %rax, 56(%rdi)
+ movq 64(%rcx), %rax
+ adcq %r8, %rax
+ movq %rax, 64(%rdi)
+LBB138_2: ## %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subNF9Lbmi2
+ .p2align 4, 0x90
+_mcl_fp_subNF9Lbmi2: ## @mcl_fp_subNF9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r10
+ movq %rdi, %rbx
+ movq 64(%rsi), %r11
+ movdqu (%rdx), %xmm1
+ movdqu 16(%rdx), %xmm2
+ movdqu 32(%rdx), %xmm3
+ movdqu 48(%rdx), %xmm4
+ pshufd $78, %xmm4, %xmm0 ## xmm0 = xmm4[2,3,0,1]
+ movd %xmm0, %r8
+ movdqu (%rsi), %xmm5
+ movdqu 16(%rsi), %xmm6
+ movdqu 32(%rsi), %xmm7
+ movdqu 48(%rsi), %xmm8
+ pshufd $78, %xmm8, %xmm0 ## xmm0 = xmm8[2,3,0,1]
+ movd %xmm0, %rax
+ movd %xmm4, %r9
+ pshufd $78, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,0,1]
+ movd %xmm0, %rdi
+ pshufd $78, %xmm7, %xmm0 ## xmm0 = xmm7[2,3,0,1]
+ movd %xmm3, %rcx
+ pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1]
+ movd %xmm3, %rbp
+ pshufd $78, %xmm6, %xmm3 ## xmm3 = xmm6[2,3,0,1]
+ movd %xmm2, %r13
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r12
+ pshufd $78, %xmm5, %xmm2 ## xmm2 = xmm5[2,3,0,1]
+ movd %xmm1, %rsi
+ movd %xmm5, %r15
+ subq %rsi, %r15
+ movd %xmm2, %r14
+ sbbq %r12, %r14
+ movd %xmm6, %r12
+ sbbq %r13, %r12
+ movd %xmm3, %r13
+ sbbq %rbp, %r13
+ movd %xmm7, %rsi
+ sbbq %rcx, %rsi
+ movq %rsi, -16(%rsp) ## 8-byte Spill
+ movd %xmm0, %rcx
+ sbbq %rdi, %rcx
+ movq %rcx, -24(%rsp) ## 8-byte Spill
+ movd %xmm8, %rcx
+ sbbq %r9, %rcx
+ movq %rcx, -32(%rsp) ## 8-byte Spill
+ sbbq %r8, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ sbbq 64(%rdx), %r11
+ movq %r11, -40(%rsp) ## 8-byte Spill
+ movq %r11, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rbp
+ shldq $1, %r11, %rbp
+ movq 24(%r10), %r9
+ andq %rbp, %r9
+ movq 8(%r10), %rdi
+ andq %rbp, %rdi
+ andq (%r10), %rbp
+ movq 64(%r10), %r11
+ andq %rdx, %r11
+ rorxq $63, %rdx, %rax
+ andq 56(%r10), %rdx
+ movq 48(%r10), %r8
+ andq %rax, %r8
+ movq 40(%r10), %rsi
+ andq %rax, %rsi
+ movq 32(%r10), %rcx
+ andq %rax, %rcx
+ andq 16(%r10), %rax
+ addq %r15, %rbp
+ adcq %r14, %rdi
+ movq %rbp, (%rbx)
+ adcq %r12, %rax
+ movq %rdi, 8(%rbx)
+ adcq %r13, %r9
+ movq %rax, 16(%rbx)
+ movq %r9, 24(%rbx)
+ adcq -16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 32(%rbx)
+ adcq -24(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 40(%rbx)
+ adcq -32(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 48(%rbx)
+ adcq -8(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 56(%rbx)
+ adcq -40(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 64(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_add9Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_add9Lbmi2: ## @mcl_fpDbl_add9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r15
+ movq 136(%rdx), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq 128(%rdx), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq 120(%rdx), %r10
+ movq 112(%rdx), %r11
+ movq 24(%rsi), %rcx
+ movq 32(%rsi), %r14
+ movq 16(%rdx), %rbp
+ movq (%rdx), %rax
+ movq 8(%rdx), %rbx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %rbp
+ adcq 24(%rdx), %rcx
+ adcq 32(%rdx), %r14
+ movq 104(%rdx), %r9
+ movq 96(%rdx), %r13
+ movq %rax, (%rdi)
+ movq 88(%rdx), %r8
+ movq %rbx, 8(%rdi)
+ movq 80(%rdx), %r12
+ movq %rbp, 16(%rdi)
+ movq 40(%rdx), %rax
+ movq %rcx, 24(%rdi)
+ movq 40(%rsi), %rbp
+ adcq %rax, %rbp
+ movq 48(%rdx), %rcx
+ movq %r14, 32(%rdi)
+ movq 48(%rsi), %rax
+ adcq %rcx, %rax
+ movq 56(%rdx), %r14
+ movq %rbp, 40(%rdi)
+ movq 56(%rsi), %rbp
+ adcq %r14, %rbp
+ movq 72(%rdx), %rcx
+ movq 64(%rdx), %rdx
+ movq %rax, 48(%rdi)
+ movq 64(%rsi), %rax
+ adcq %rdx, %rax
+ movq 136(%rsi), %rbx
+ movq %rbp, 56(%rdi)
+ movq 72(%rsi), %rbp
+ adcq %rcx, %rbp
+ movq 128(%rsi), %rcx
+ movq %rax, 64(%rdi)
+ movq 80(%rsi), %rdx
+ adcq %r12, %rdx
+ movq 88(%rsi), %r12
+ adcq %r8, %r12
+ movq 96(%rsi), %r14
+ adcq %r13, %r14
+ movq %r14, -8(%rsp) ## 8-byte Spill
+ movq 104(%rsi), %rax
+ adcq %r9, %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq 120(%rsi), %rax
+ movq 112(%rsi), %rsi
+ adcq %r11, %rsi
+ movq %rsi, -24(%rsp) ## 8-byte Spill
+ adcq %r10, %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ adcq -40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -40(%rsp) ## 8-byte Spill
+ adcq -48(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -48(%rsp) ## 8-byte Spill
+ sbbq %r9, %r9
+ andl $1, %r9d
+ movq %rbp, %r10
+ subq (%r15), %r10
+ movq %rdx, %r11
+ sbbq 8(%r15), %r11
+ movq %r12, %rbx
+ sbbq 16(%r15), %rbx
+ sbbq 24(%r15), %r14
+ movq -32(%rsp), %r13 ## 8-byte Reload
+ sbbq 32(%r15), %r13
+ movq -24(%rsp), %rsi ## 8-byte Reload
+ sbbq 40(%r15), %rsi
+ movq -16(%rsp), %rax ## 8-byte Reload
+ sbbq 48(%r15), %rax
+ sbbq 56(%r15), %rcx
+ movq -48(%rsp), %r8 ## 8-byte Reload
+ sbbq 64(%r15), %r8
+ sbbq $0, %r9
+ andl $1, %r9d
+ cmovneq %rbp, %r10
+ movq %r10, 72(%rdi)
+ testb %r9b, %r9b
+ cmovneq %rdx, %r11
+ movq %r11, 80(%rdi)
+ cmovneq %r12, %rbx
+ movq %rbx, 88(%rdi)
+ cmovneq -8(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, 96(%rdi)
+ cmovneq -32(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 104(%rdi)
+ cmovneq -24(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 112(%rdi)
+ cmovneq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 120(%rdi)
+ cmovneq -40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 128(%rdi)
+ cmovneq -48(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 136(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub9Lbmi2
+ .p2align 4, 0x90
+_mcl_fpDbl_sub9Lbmi2: ## @mcl_fpDbl_sub9Lbmi2
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r14
+ movq 136(%rdx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq 128(%rdx), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq 120(%rdx), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r11
+ movq (%rsi), %r12
+ movq 8(%rsi), %r13
+ xorl %r9d, %r9d
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r13
+ sbbq 16(%rdx), %r11
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %rbp
+ sbbq 32(%rdx), %rbp
+ movq 112(%rdx), %r10
+ movq 104(%rdx), %rcx
+ movq %r12, (%rdi)
+ movq 96(%rdx), %rax
+ movq %r13, 8(%rdi)
+ movq 88(%rdx), %r13
+ movq %r11, 16(%rdi)
+ movq 40(%rdx), %r11
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 48(%rdx), %r11
+ movq %rbp, 32(%rdi)
+ movq 48(%rsi), %rbp
+ sbbq %r11, %rbp
+ movq 56(%rdx), %r11
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 64(%rdx), %r11
+ movq %rbp, 48(%rdi)
+ movq 64(%rsi), %rbp
+ sbbq %r11, %rbp
+ movq 80(%rdx), %r8
+ movq 72(%rdx), %r11
+ movq %rbx, 56(%rdi)
+ movq 72(%rsi), %r15
+ sbbq %r11, %r15
+ movq 136(%rsi), %rdx
+ movq %rbp, 64(%rdi)
+ movq 80(%rsi), %rbp
+ sbbq %r8, %rbp
+ movq 88(%rsi), %r12
+ sbbq %r13, %r12
+ movq 96(%rsi), %r13
+ sbbq %rax, %r13
+ movq 104(%rsi), %rax
+ sbbq %rcx, %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq 112(%rsi), %rax
+ sbbq %r10, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 128(%rsi), %rax
+ movq 120(%rsi), %rcx
+ sbbq -40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -40(%rsp) ## 8-byte Spill
+ sbbq -32(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ sbbq -24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movl $0, %r8d
+ sbbq $0, %r8
+ andl $1, %r8d
+ movq (%r14), %r10
+ cmoveq %r9, %r10
+ testb %r8b, %r8b
+ movq 16(%r14), %r8
+ cmoveq %r9, %r8
+ movq 8(%r14), %rdx
+ cmoveq %r9, %rdx
+ movq 64(%r14), %rbx
+ cmoveq %r9, %rbx
+ movq 56(%r14), %r11
+ cmoveq %r9, %r11
+ movq 48(%r14), %rsi
+ cmoveq %r9, %rsi
+ movq 40(%r14), %rcx
+ cmoveq %r9, %rcx
+ movq 32(%r14), %rax
+ cmoveq %r9, %rax
+ cmovneq 24(%r14), %r9
+ addq %r15, %r10
+ adcq %rbp, %rdx
+ movq %r10, 72(%rdi)
+ adcq %r12, %r8
+ movq %rdx, 80(%rdi)
+ adcq %r13, %r9
+ movq %r8, 88(%rdi)
+ movq %r9, 96(%rdi)
+ adcq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 104(%rdi)
+ adcq -8(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 112(%rdi)
+ adcq -40(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 120(%rdi)
+ adcq -32(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 128(%rdi)
+ adcq -24(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 136(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+
+.subsections_via_symbols
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/x86-64mac.s b/vendor/github.com/tangerine-network/mcl/src/asm/x86-64mac.s
new file mode 100644
index 000000000..0dc7014a3
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/x86-64mac.s
@@ -0,0 +1,16313 @@
+ .section __TEXT,__text,regular,pure_instructions
+ .macosx_version_min 10, 12
+ .globl _makeNIST_P192L
+ .p2align 4, 0x90
+_makeNIST_P192L: ## @makeNIST_P192L
+## BB#0:
+ movq $-1, %rax
+ movq $-2, %rdx
+ movq $-1, %rcx
+ retq
+
+ .globl _mcl_fpDbl_mod_NIST_P192L
+ .p2align 4, 0x90
+_mcl_fpDbl_mod_NIST_P192L: ## @mcl_fpDbl_mod_NIST_P192L
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 16(%rsi), %r10
+ movq 24(%rsi), %r8
+ movq 40(%rsi), %r9
+ movq 8(%rsi), %rax
+ addq %r9, %rax
+ adcq $0, %r10
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq 32(%rsi), %r11
+ movq (%rsi), %r14
+ addq %r8, %r14
+ adcq %r11, %rax
+ adcq %r9, %r10
+ adcq $0, %rcx
+ addq %r9, %r14
+ adcq %r8, %rax
+ adcq %r11, %r10
+ adcq $0, %rcx
+ addq %rcx, %r14
+ adcq %rax, %rcx
+ adcq $0, %r10
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r14, %rsi
+ addq $1, %rsi
+ movq %rcx, %rdx
+ adcq $1, %rdx
+ movq %r10, %rbx
+ adcq $0, %rbx
+ adcq $-1, %rax
+ andl $1, %eax
+ cmovneq %r14, %rsi
+ movq %rsi, (%rdi)
+ testb %al, %al
+ cmovneq %rcx, %rdx
+ movq %rdx, 8(%rdi)
+ cmovneq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fp_sqr_NIST_P192L
+ .p2align 4, 0x90
+_mcl_fp_sqr_NIST_P192L: ## @mcl_fp_sqr_NIST_P192L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r11
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rcx
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %rdi
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %r15
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq %rbx
+ movq %rax, %r13
+ movq %rdx, %rcx
+ addq %rcx, %r12
+ adcq %r14, %r15
+ movq %rdi, %r10
+ adcq $0, %r10
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %r9
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rax, %r8
+ movq %rdx, %rsi
+ addq %r13, %rsi
+ adcq %rbp, %rcx
+ movq %r9, %rbx
+ adcq $0, %rbx
+ addq %r13, %rsi
+ adcq %r12, %rcx
+ adcq %r15, %rbx
+ adcq $0, %r10
+ movq %r11, %rax
+ mulq %r11
+ addq %r14, %r9
+ adcq %rdi, %rax
+ adcq $0, %rdx
+ addq %rbp, %rcx
+ adcq %rbx, %r9
+ adcq %r10, %rax
+ adcq $0, %rdx
+ addq %rdx, %rsi
+ adcq $0, %rcx
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq %r9, %r8
+ adcq %rax, %rsi
+ adcq %rdx, %rcx
+ adcq $0, %rbp
+ addq %rdx, %r8
+ adcq %r9, %rsi
+ adcq %rax, %rcx
+ adcq $0, %rbp
+ addq %rbp, %r8
+ adcq %rsi, %rbp
+ adcq $0, %rcx
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r8, %rdx
+ addq $1, %rdx
+ movq %rbp, %rsi
+ adcq $1, %rsi
+ movq %rcx, %rdi
+ adcq $0, %rdi
+ adcq $-1, %rax
+ andl $1, %eax
+ cmovneq %r8, %rdx
+ movq -8(%rsp), %rbx ## 8-byte Reload
+ movq %rdx, (%rbx)
+ testb %al, %al
+ cmovneq %rbp, %rsi
+ movq %rsi, 8(%rbx)
+ cmovneq %rcx, %rdi
+ movq %rdi, 16(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mulNIST_P192L
+ .p2align 4, 0x90
+_mcl_fp_mulNIST_P192L: ## @mcl_fp_mulNIST_P192L
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ subq $56, %rsp
+ movq %rdi, %r14
+ leaq 8(%rsp), %rdi
+ callq _mcl_fpDbl_mulPre3L
+ movq 24(%rsp), %r9
+ movq 32(%rsp), %r8
+ movq 48(%rsp), %rdi
+ movq 16(%rsp), %rbx
+ addq %rdi, %rbx
+ adcq $0, %r9
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ movq 40(%rsp), %rsi
+ movq 8(%rsp), %rdx
+ addq %r8, %rdx
+ adcq %rsi, %rbx
+ adcq %rdi, %r9
+ adcq $0, %rcx
+ addq %rdi, %rdx
+ adcq %r8, %rbx
+ adcq %rsi, %r9
+ adcq $0, %rcx
+ addq %rcx, %rdx
+ adcq %rbx, %rcx
+ adcq $0, %r9
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rdx, %rdi
+ addq $1, %rdi
+ movq %rcx, %rbx
+ adcq $1, %rbx
+ movq %r9, %rax
+ adcq $0, %rax
+ adcq $-1, %rsi
+ andl $1, %esi
+ cmovneq %rdx, %rdi
+ movq %rdi, (%r14)
+ testb %sil, %sil
+ cmovneq %rcx, %rbx
+ movq %rbx, 8(%r14)
+ cmovneq %r9, %rax
+ movq %rax, 16(%r14)
+ addq $56, %rsp
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fpDbl_mod_NIST_P521L
+ .p2align 4, 0x90
+_mcl_fpDbl_mod_NIST_P521L: ## @mcl_fpDbl_mod_NIST_P521L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 120(%rsi), %r9
+ movq 128(%rsi), %r14
+ movq %r14, %r8
+ shldq $55, %r9, %r8
+ movq 112(%rsi), %r10
+ shldq $55, %r10, %r9
+ movq 104(%rsi), %r11
+ shldq $55, %r11, %r10
+ movq 96(%rsi), %r15
+ shldq $55, %r15, %r11
+ movq 88(%rsi), %r12
+ shldq $55, %r12, %r15
+ movq 80(%rsi), %rcx
+ shldq $55, %rcx, %r12
+ movq 64(%rsi), %rbx
+ movq 72(%rsi), %rax
+ shldq $55, %rax, %rcx
+ shrq $9, %r14
+ shldq $55, %rbx, %rax
+ ## kill: %EBX<def> %EBX<kill> %RBX<kill> %RBX<def>
+ andl $511, %ebx ## imm = 0x1FF
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r12
+ adcq 24(%rsi), %r15
+ adcq 32(%rsi), %r11
+ adcq 40(%rsi), %r10
+ adcq 48(%rsi), %r9
+ adcq 56(%rsi), %r8
+ adcq %r14, %rbx
+ movl %ebx, %esi
+ shrl $9, %esi
+ andl $1, %esi
+ addq %rax, %rsi
+ adcq $0, %rcx
+ adcq $0, %r12
+ adcq $0, %r15
+ adcq $0, %r11
+ adcq $0, %r10
+ adcq $0, %r9
+ adcq $0, %r8
+ adcq $0, %rbx
+ movq %rsi, %rax
+ andq %r12, %rax
+ andq %r15, %rax
+ andq %r11, %rax
+ andq %r10, %rax
+ andq %r9, %rax
+ andq %r8, %rax
+ movq %rbx, %rdx
+ orq $-512, %rdx ## imm = 0xFE00
+ andq %rax, %rdx
+ andq %rcx, %rdx
+ cmpq $-1, %rdx
+ je LBB4_1
+## BB#3: ## %nonzero
+ movq %rsi, (%rdi)
+ movq %rcx, 8(%rdi)
+ movq %r12, 16(%rdi)
+ movq %r15, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %r8, 56(%rdi)
+ andl $511, %ebx ## imm = 0x1FF
+ movq %rbx, 64(%rdi)
+ jmp LBB4_2
+LBB4_1: ## %zero
+ movq $0, 64(%rdi)
+ movq $0, 56(%rdi)
+ movq $0, 48(%rdi)
+ movq $0, 40(%rdi)
+ movq $0, 32(%rdi)
+ movq $0, 24(%rdi)
+ movq $0, 16(%rdi)
+ movq $0, 8(%rdi)
+ movq $0, (%rdi)
+LBB4_2: ## %zero
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_mulUnitPre1L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre1L: ## @mcl_fp_mulUnitPre1L
+## BB#0:
+ movq %rdx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_mulPre1L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre1L: ## @mcl_fpDbl_mulPre1L
+## BB#0:
+ movq (%rdx), %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_sqrPre1L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre1L: ## @mcl_fpDbl_sqrPre1L
+## BB#0:
+ movq (%rsi), %rax
+ mulq %rax
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_mont1L
+ .p2align 4, 0x90
+_mcl_fp_mont1L: ## @mcl_fp_mont1L
+## BB#0:
+ movq (%rsi), %rax
+ mulq (%rdx)
+ movq %rax, %rsi
+ movq %rdx, %r8
+ movq -8(%rcx), %rax
+ imulq %rsi, %rax
+ movq (%rcx), %rcx
+ mulq %rcx
+ addq %rsi, %rax
+ adcq %r8, %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rsi
+ subq %rcx, %rsi
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rsi
+ movq %rsi, (%rdi)
+ retq
+
+ .globl _mcl_fp_montNF1L
+ .p2align 4, 0x90
+_mcl_fp_montNF1L: ## @mcl_fp_montNF1L
+## BB#0:
+ movq (%rsi), %rax
+ mulq (%rdx)
+ movq %rax, %rsi
+ movq %rdx, %r8
+ movq -8(%rcx), %rax
+ imulq %rsi, %rax
+ movq (%rcx), %rcx
+ mulq %rcx
+ addq %rsi, %rax
+ adcq %r8, %rdx
+ movq %rdx, %rax
+ subq %rcx, %rax
+ cmovsq %rdx, %rax
+ movq %rax, (%rdi)
+ retq
+
+ .globl _mcl_fp_montRed1L
+ .p2align 4, 0x90
+_mcl_fp_montRed1L: ## @mcl_fp_montRed1L
+## BB#0:
+ movq (%rsi), %rcx
+ movq -8(%rdx), %rax
+ imulq %rcx, %rax
+ movq (%rdx), %r8
+ mulq %r8
+ addq %rcx, %rax
+ adcq 8(%rsi), %rdx
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rcx
+ subq %r8, %rcx
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rcx
+ movq %rcx, (%rdi)
+ retq
+
+ .globl _mcl_fp_addPre1L
+ .p2align 4, 0x90
+_mcl_fp_addPre1L: ## @mcl_fp_addPre1L
+## BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, (%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre1L
+ .p2align 4, 0x90
+_mcl_fp_subPre1L: ## @mcl_fp_subPre1L
+## BB#0:
+ movq (%rsi), %rcx
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ movq %rcx, (%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_1L
+ .p2align 4, 0x90
+_mcl_fp_shr1_1L: ## @mcl_fp_shr1_1L
+## BB#0:
+ movq (%rsi), %rax
+ shrq %rax
+ movq %rax, (%rdi)
+ retq
+
+ .globl _mcl_fp_add1L
+ .p2align 4, 0x90
+_mcl_fp_add1L: ## @mcl_fp_add1L
+## BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, (%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %rax
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne LBB14_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+LBB14_2: ## %carry
+ retq
+
+ .globl _mcl_fp_addNF1L
+ .p2align 4, 0x90
+_mcl_fp_addNF1L: ## @mcl_fp_addNF1L
+## BB#0:
+ movq (%rdx), %rax
+ addq (%rsi), %rax
+ movq %rax, %rdx
+ subq (%rcx), %rdx
+ cmovsq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+
+ .globl _mcl_fp_sub1L
+ .p2align 4, 0x90
+_mcl_fp_sub1L: ## @mcl_fp_sub1L
+## BB#0:
+ movq (%rsi), %rax
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ movq %rax, (%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB16_2
+## BB#1: ## %nocarry
+ retq
+LBB16_2: ## %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ retq
+
+ .globl _mcl_fp_subNF1L
+ .p2align 4, 0x90
+_mcl_fp_subNF1L: ## @mcl_fp_subNF1L
+## BB#0:
+ movq (%rsi), %rax
+ subq (%rdx), %rax
+ movq %rax, %rdx
+ sarq $63, %rdx
+ andq (%rcx), %rdx
+ addq %rax, %rdx
+ movq %rdx, (%rdi)
+ retq
+
+ .globl _mcl_fpDbl_add1L
+ .p2align 4, 0x90
+_mcl_fpDbl_add1L: ## @mcl_fpDbl_add1L
+## BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ movq %rax, (%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rdx, %rsi
+ subq (%rcx), %rsi
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_sub1L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub1L: ## @mcl_fpDbl_sub1L
+## BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %r8
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r8
+ movq %rax, (%rdi)
+ movl $0, %eax
+ sbbq $0, %rax
+ testb $1, %al
+ cmovneq (%rcx), %rsi
+ addq %r8, %rsi
+ movq %rsi, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_mulUnitPre2L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre2L: ## @mcl_fp_mulUnitPre2L
+## BB#0:
+ movq %rdx, %r8
+ movq %r8, %rax
+ mulq 8(%rsi)
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %r8, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq $0, %rcx
+ movq %rcx, 16(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_mulPre2L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre2L: ## @mcl_fpDbl_mulPre2L
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq %rdx, %r10
+ movq (%rsi), %r8
+ movq 8(%rsi), %r11
+ movq (%r10), %rcx
+ movq %r8, %rax
+ mulq %rcx
+ movq %rdx, %r9
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %r14
+ movq %rax, %rsi
+ addq %r9, %rsi
+ adcq $0, %r14
+ movq 8(%r10), %rbx
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %r9
+ movq %rax, %rcx
+ movq %r8, %rax
+ mulq %rbx
+ addq %rsi, %rax
+ movq %rax, 8(%rdi)
+ adcq %r14, %rcx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rcx
+ movq %rcx, 16(%rdi)
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fpDbl_sqrPre2L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre2L: ## @mcl_fpDbl_sqrPre2L
+## BB#0:
+ movq (%rsi), %rcx
+ movq 8(%rsi), %r8
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %rsi
+ movq %rax, (%rdi)
+ movq %r8, %rax
+ mulq %rcx
+ movq %rdx, %r9
+ movq %rax, %r10
+ addq %r10, %rsi
+ movq %r9, %rcx
+ adcq $0, %rcx
+ movq %r8, %rax
+ mulq %r8
+ addq %r10, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %rcx, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r9, %rax
+ movq %rax, 16(%rdi)
+ adcq %rdx, %rcx
+ movq %rcx, 24(%rdi)
+ retq
+
+ .globl _mcl_fp_mont2L
+ .p2align 4, 0x90
+_mcl_fp_mont2L: ## @mcl_fp_mont2L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq (%rsi), %r8
+ movq 8(%rsi), %r11
+ movq (%rdx), %rsi
+ movq 8(%rdx), %r9
+ movq %r11, %rax
+ mulq %rsi
+ movq %rdx, %r15
+ movq %rax, %r10
+ movq %r8, %rax
+ mulq %rsi
+ movq %rax, %r14
+ movq %rdx, %r13
+ addq %r10, %r13
+ adcq $0, %r15
+ movq -8(%rcx), %r10
+ movq (%rcx), %rbp
+ movq %r14, %rsi
+ imulq %r10, %rsi
+ movq 8(%rcx), %rdi
+ movq %rsi, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %rbx
+ addq %r12, %rbx
+ adcq $0, %rcx
+ addq %r14, %rax
+ adcq %r13, %rbx
+ adcq %r15, %rcx
+ sbbq %r15, %r15
+ andl $1, %r15d
+ movq %r9, %rax
+ mulq %r11
+ movq %rdx, %r14
+ movq %rax, %r11
+ movq %r9, %rax
+ mulq %r8
+ movq %rax, %r8
+ movq %rdx, %rsi
+ addq %r11, %rsi
+ adcq $0, %r14
+ addq %rbx, %r8
+ adcq %rcx, %rsi
+ adcq %r15, %r14
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ imulq %r8, %r10
+ movq %r10, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %r10, %rax
+ mulq %rbp
+ addq %r9, %rdx
+ adcq $0, %rcx
+ addq %r8, %rax
+ adcq %rsi, %rdx
+ adcq %r14, %rcx
+ adcq $0, %rbx
+ movq %rdx, %rax
+ subq %rbp, %rax
+ movq %rcx, %rsi
+ sbbq %rdi, %rsi
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %rcx, %rsi
+ testb %bl, %bl
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rcx ## 8-byte Reload
+ movq %rax, (%rcx)
+ movq %rsi, 8(%rcx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF2L
+ .p2align 4, 0x90
+_mcl_fp_montNF2L: ## @mcl_fp_montNF2L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq (%rsi), %r8
+ movq 8(%rsi), %r11
+ movq (%rdx), %rbp
+ movq 8(%rdx), %r9
+ movq %r8, %rax
+ mulq %rbp
+ movq %rax, %rsi
+ movq %rdx, %r14
+ movq -8(%rcx), %r10
+ movq (%rcx), %r15
+ movq %rsi, %rbx
+ imulq %r10, %rbx
+ movq 8(%rcx), %rdi
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq %r15
+ movq %rdx, %r12
+ movq %rax, %rbx
+ movq %r11, %rax
+ mulq %rbp
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ addq %r14, %rbp
+ adcq $0, %rcx
+ addq %rsi, %rbx
+ adcq %r13, %rbp
+ adcq $0, %rcx
+ addq %r12, %rbp
+ adcq -16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %r9, %rax
+ mulq %r11
+ movq %rdx, %rsi
+ movq %rax, %r11
+ movq %r9, %rax
+ mulq %r8
+ movq %rax, %r8
+ movq %rdx, %rbx
+ addq %r11, %rbx
+ adcq $0, %rsi
+ addq %rbp, %r8
+ adcq %rcx, %rbx
+ adcq $0, %rsi
+ imulq %r8, %r10
+ movq %r10, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ movq %r10, %rax
+ mulq %r15
+ addq %r8, %rax
+ adcq %rbx, %rbp
+ adcq $0, %rsi
+ addq %rdx, %rbp
+ adcq %rcx, %rsi
+ movq %rbp, %rax
+ subq %r15, %rax
+ movq %rsi, %rcx
+ sbbq %rdi, %rcx
+ cmovsq %rbp, %rax
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rax, (%rdx)
+ cmovsq %rsi, %rcx
+ movq %rcx, 8(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed2L
+ .p2align 4, 0x90
+_mcl_fp_montRed2L: ## @mcl_fp_montRed2L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq -8(%rdx), %r9
+ movq (%rdx), %r11
+ movq (%rsi), %rbx
+ movq %rbx, %rcx
+ imulq %r9, %rcx
+ movq 8(%rdx), %r14
+ movq %rcx, %rax
+ mulq %r14
+ movq %rdx, %r8
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq %r11
+ movq %rdx, %rcx
+ addq %r10, %rcx
+ adcq $0, %r8
+ movq 24(%rsi), %r15
+ addq %rbx, %rax
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r8
+ adcq $0, %r15
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ imulq %rcx, %r9
+ movq %r9, %rax
+ mulq %r14
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %r9, %rax
+ mulq %r11
+ addq %r10, %rdx
+ adcq $0, %rsi
+ addq %rcx, %rax
+ adcq %r8, %rdx
+ adcq %r15, %rsi
+ adcq $0, %rbx
+ movq %rdx, %rax
+ subq %r11, %rax
+ movq %rsi, %rcx
+ sbbq %r14, %rcx
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %rsi, %rcx
+ testb %bl, %bl
+ cmovneq %rdx, %rax
+ movq %rax, (%rdi)
+ movq %rcx, 8(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addPre2L
+ .p2align 4, 0x90
+_mcl_fp_addPre2L: ## @mcl_fp_addPre2L
+## BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rcx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rcx
+ movq %rax, (%rdi)
+ movq %rcx, 8(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre2L
+ .p2align 4, 0x90
+_mcl_fp_subPre2L: ## @mcl_fp_subPre2L
+## BB#0:
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_2L
+ .p2align 4, 0x90
+_mcl_fp_shr1_2L: ## @mcl_fp_shr1_2L
+## BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %rcx
+ shrdq $1, %rcx, %rax
+ movq %rax, (%rdi)
+ shrq %rcx
+ movq %rcx, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_add2L
+ .p2align 4, 0x90
+_mcl_fp_add2L: ## @mcl_fp_add2L
+## BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB29_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+LBB29_2: ## %carry
+ retq
+
+ .globl _mcl_fp_addNF2L
+ .p2align 4, 0x90
+_mcl_fp_addNF2L: ## @mcl_fp_addNF2L
+## BB#0:
+ movq (%rdx), %rax
+ movq 8(%rdx), %r8
+ addq (%rsi), %rax
+ adcq 8(%rsi), %r8
+ movq %rax, %rsi
+ subq (%rcx), %rsi
+ movq %r8, %rdx
+ sbbq 8(%rcx), %rdx
+ testq %rdx, %rdx
+ cmovsq %rax, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r8, %rdx
+ movq %rdx, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_sub2L
+ .p2align 4, 0x90
+_mcl_fp_sub2L: ## @mcl_fp_sub2L
+## BB#0:
+ movq (%rsi), %rax
+ movq 8(%rsi), %r8
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r8
+ movq %rax, (%rdi)
+ movq %r8, 8(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB31_2
+## BB#1: ## %nocarry
+ retq
+LBB31_2: ## %carry
+ movq 8(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r8, %rdx
+ movq %rdx, 8(%rdi)
+ retq
+
+ .globl _mcl_fp_subNF2L
+ .p2align 4, 0x90
+_mcl_fp_subNF2L: ## @mcl_fp_subNF2L
+## BB#0:
+ movq (%rsi), %r8
+ movq 8(%rsi), %rsi
+ subq (%rdx), %r8
+ sbbq 8(%rdx), %rsi
+ movq %rsi, %rdx
+ sarq $63, %rdx
+ movq 8(%rcx), %rax
+ andq %rdx, %rax
+ andq (%rcx), %rdx
+ addq %r8, %rdx
+ movq %rdx, (%rdi)
+ adcq %rsi, %rax
+ movq %rax, 8(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_add2L
+ .p2align 4, 0x90
+_mcl_fpDbl_add2L: ## @mcl_fpDbl_add2L
+## BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rdx), %r10
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r10
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ adcq %r8, %r9
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r10, %rdx
+ subq (%rcx), %rdx
+ movq %r9, %rsi
+ sbbq 8(%rcx), %rsi
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %r10, %rdx
+ movq %rdx, 16(%rdi)
+ testb %al, %al
+ cmovneq %r9, %rsi
+ movq %rsi, 24(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_sub2L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub2L: ## @mcl_fpDbl_sub2L
+## BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %r11
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %r11
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %r11, (%rdi)
+ movq %rsi, 8(%rdi)
+ sbbq %r8, %r9
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ cmovneq 8(%rcx), %rax
+ addq %r10, %rsi
+ movq %rsi, 16(%rdi)
+ adcq %r9, %rax
+ movq %rax, 24(%rdi)
+ retq
+
+ .globl _mcl_fp_mulUnitPre3L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre3L: ## @mcl_fp_mulUnitPre3L
+## BB#0:
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r9, %r10
+ movq %r10, 16(%rdi)
+ adcq $0, %r8
+ movq %r8, 24(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_mulPre3L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre3L: ## @mcl_fpDbl_mulPre3L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r10
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ movq (%r10), %rbx
+ movq %r8, %rax
+ mulq %rbx
+ movq %rdx, %rcx
+ movq 16(%rsi), %r11
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %r14
+ movq %rax, %rsi
+ movq %r9, %rax
+ mulq %rbx
+ movq %rdx, %r15
+ movq %rax, %rbx
+ addq %rcx, %rbx
+ adcq %rsi, %r15
+ adcq $0, %r14
+ movq 8(%r10), %rcx
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %r9, %rax
+ mulq %rcx
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %r8, %rax
+ mulq %rcx
+ addq %rbx, %rax
+ movq %rax, 8(%rdi)
+ adcq %r15, %rsi
+ adcq %r14, %rbp
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq %rdx, %rsi
+ adcq %r13, %rbp
+ adcq %r12, %r14
+ movq 16(%r10), %r15
+ movq %r11, %rax
+ mulq %r15
+ movq %rdx, %r10
+ movq %rax, %rbx
+ movq %r9, %rax
+ mulq %r15
+ movq %rdx, %r9
+ movq %rax, %rcx
+ movq %r8, %rax
+ mulq %r15
+ addq %rsi, %rax
+ movq %rax, 16(%rdi)
+ adcq %rbp, %rcx
+ adcq %r14, %rbx
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rcx
+ movq %rcx, 24(%rdi)
+ adcq %r9, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r10, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre3L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre3L: ## @mcl_fpDbl_sqrPre3L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 16(%rsi), %r10
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %rbx
+ movq %rax, (%rdi)
+ movq %r10, %rax
+ mulq %rcx
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq %rcx
+ movq %rdx, %r14
+ movq %rax, %r12
+ addq %r12, %rbx
+ movq %r14, %r13
+ adcq %r11, %r13
+ movq %r8, %rcx
+ adcq $0, %rcx
+ movq %r10, %rax
+ mulq %rsi
+ movq %rdx, %r9
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq %rsi
+ movq %rax, %rsi
+ addq %r12, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r13, %rsi
+ adcq %r15, %rcx
+ sbbq %rbx, %rbx
+ andl $1, %ebx
+ addq %r14, %rsi
+ adcq %rdx, %rcx
+ adcq %r9, %rbx
+ movq %r10, %rax
+ mulq %r10
+ addq %r11, %rsi
+ movq %rsi, 16(%rdi)
+ adcq %r15, %rcx
+ adcq %rbx, %rax
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq %r8, %rcx
+ movq %rcx, 24(%rdi)
+ adcq %r9, %rax
+ movq %rax, 32(%rdi)
+ adcq %rdx, %rsi
+ movq %rsi, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_mont3L
+ .p2align 4, 0x90
+_mcl_fp_mont3L: ## @mcl_fp_mont3L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r10
+ movq (%rdx), %rdi
+ movq %rdx, %r11
+ movq %r11, -16(%rsp) ## 8-byte Spill
+ movq %r10, %rax
+ movq %r10, -24(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %rbx
+ movq %rdx, %r15
+ movq (%rsi), %rbp
+ movq %rbp, -64(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, %r12
+ movq %rax, %rsi
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, %r8
+ movq %rdx, %r13
+ addq %rsi, %r13
+ adcq %rbx, %r12
+ adcq $0, %r15
+ movq -8(%rcx), %r14
+ movq %r8, %rbp
+ imulq %r14, %rbp
+ movq 16(%rcx), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r9
+ movq %rdx, %rbx
+ movq (%rcx), %rdi
+ movq %rdi, -40(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -48(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, %rsi
+ movq %rax, %rcx
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rdx, %rbp
+ addq %rcx, %rbp
+ adcq %r9, %rsi
+ adcq $0, %rbx
+ addq %r8, %rax
+ adcq %r13, %rbp
+ movq 8(%r11), %rcx
+ adcq %r12, %rsi
+ adcq %r15, %rbx
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ movq %rcx, %rax
+ mulq %r10
+ movq %rdx, %r15
+ movq %rax, %r8
+ movq %rcx, %rax
+ movq -32(%rsp), %r10 ## 8-byte Reload
+ mulq %r10
+ movq %rdx, %r12
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r13
+ movq %rdx, %rcx
+ addq %r9, %rcx
+ adcq %r8, %r12
+ adcq $0, %r15
+ addq %rbp, %r13
+ adcq %rsi, %rcx
+ adcq %rbx, %r12
+ adcq %rdi, %r15
+ sbbq %r11, %r11
+ andl $1, %r11d
+ movq %r13, %rdi
+ imulq %r14, %rdi
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ addq %r9, %rbp
+ adcq %r8, %rsi
+ adcq $0, %rbx
+ addq %r13, %rax
+ adcq %rcx, %rbp
+ adcq %r12, %rsi
+ adcq %r15, %rbx
+ adcq $0, %r11
+ movq -16(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rcx
+ movq %rcx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq %r10
+ movq %rdx, %r10
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %rcx
+ addq %rdi, %rcx
+ adcq %r15, %r10
+ adcq $0, %r8
+ addq %rbp, %r9
+ adcq %rsi, %rcx
+ adcq %rbx, %r10
+ adcq %r11, %r8
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ imulq %r9, %r14
+ movq %r14, %rax
+ movq -56(%rsp), %r15 ## 8-byte Reload
+ mulq %r15
+ movq %rdx, %rbx
+ movq %rax, %r11
+ movq %r14, %rax
+ movq -48(%rsp), %r12 ## 8-byte Reload
+ mulq %r12
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq %r14, %rax
+ movq -40(%rsp), %rbp ## 8-byte Reload
+ mulq %rbp
+ addq %r13, %rdx
+ adcq %r11, %rsi
+ adcq $0, %rbx
+ addq %r9, %rax
+ adcq %rcx, %rdx
+ adcq %r10, %rsi
+ adcq %r8, %rbx
+ adcq $0, %rdi
+ movq %rdx, %rax
+ subq %rbp, %rax
+ movq %rsi, %rcx
+ sbbq %r12, %rcx
+ movq %rbx, %rbp
+ sbbq %r15, %rbp
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %rbx, %rbp
+ testb %dil, %dil
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rsi, %rcx
+ movq %rcx, 8(%rdx)
+ movq %rbp, 16(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF3L
+ .p2align 4, 0x90
+_mcl_fp_montNF3L: ## @mcl_fp_montNF3L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r10
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r11
+ movq (%r10), %rbp
+ movq %r10, -16(%rsp) ## 8-byte Spill
+ movq %r11, %rax
+ movq %r11, -24(%rsp) ## 8-byte Spill
+ mulq %rbp
+ movq %rax, %r14
+ movq %rdx, %r15
+ movq (%rsi), %rbx
+ movq %rbx, -48(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulq %rbp
+ movq %rdx, %rdi
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rax, %r13
+ movq %rdx, %rbp
+ addq %r8, %rbp
+ adcq %r14, %rdi
+ adcq $0, %r15
+ movq -8(%rcx), %r14
+ movq %r13, %rbx
+ imulq %r14, %rbx
+ movq 16(%rcx), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r12
+ movq %rdx, %r8
+ movq (%rcx), %rsi
+ movq %rsi, -32(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -40(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rdx, %r9
+ movq %rax, %rcx
+ movq %rbx, %rax
+ mulq %rsi
+ addq %r13, %rax
+ adcq %rbp, %rcx
+ adcq %rdi, %r12
+ adcq $0, %r15
+ addq %rdx, %rcx
+ movq 8(%r10), %rbp
+ adcq %r9, %r12
+ adcq %r8, %r15
+ movq %rbp, %rax
+ mulq %r11
+ movq %rdx, %rsi
+ movq %rax, %r8
+ movq %rbp, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r9
+ movq %rbp, %rax
+ movq -48(%rsp), %r10 ## 8-byte Reload
+ mulq %r10
+ movq %rax, %r13
+ movq %rdx, %rbp
+ addq %r9, %rbp
+ adcq %r8, %rbx
+ adcq $0, %rsi
+ addq %rcx, %r13
+ adcq %r12, %rbp
+ adcq %r15, %rbx
+ adcq $0, %rsi
+ movq %r13, %rcx
+ imulq %r14, %rcx
+ movq %rcx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r15
+ movq %rcx, %rax
+ movq -40(%rsp), %rdi ## 8-byte Reload
+ mulq %rdi
+ movq %rdx, %r9
+ movq %rax, %r12
+ movq %rcx, %rax
+ movq -32(%rsp), %r11 ## 8-byte Reload
+ mulq %r11
+ addq %r13, %rax
+ adcq %rbp, %r12
+ adcq %rbx, %r15
+ adcq $0, %rsi
+ addq %rdx, %r12
+ adcq %r9, %r15
+ adcq %r8, %rsi
+ movq -16(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rbx
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r9
+ movq %rbx, %rax
+ mulq %r10
+ movq %rax, %r10
+ movq %rdx, %rbx
+ addq %r9, %rbx
+ adcq %r8, %rcx
+ adcq $0, %rbp
+ addq %r12, %r10
+ adcq %r15, %rbx
+ adcq %rsi, %rcx
+ adcq $0, %rbp
+ imulq %r10, %r14
+ movq %r14, %rax
+ movq -56(%rsp), %r15 ## 8-byte Reload
+ mulq %r15
+ movq %rdx, %r8
+ movq %rax, %rsi
+ movq %r14, %rax
+ movq %rdi, %r12
+ mulq %r12
+ movq %rdx, %r9
+ movq %rax, %rdi
+ movq %r14, %rax
+ mulq %r11
+ addq %r10, %rax
+ adcq %rbx, %rdi
+ adcq %rcx, %rsi
+ adcq $0, %rbp
+ addq %rdx, %rdi
+ adcq %r9, %rsi
+ adcq %r8, %rbp
+ movq %rdi, %rax
+ subq %r11, %rax
+ movq %rsi, %rcx
+ sbbq %r12, %rcx
+ movq %rbp, %rbx
+ sbbq %r15, %rbx
+ movq %rbx, %rdx
+ sarq $63, %rdx
+ cmovsq %rdi, %rax
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rax, (%rdx)
+ cmovsq %rsi, %rcx
+ movq %rcx, 8(%rdx)
+ cmovsq %rbp, %rbx
+ movq %rbx, 16(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed3L
+ .p2align 4, 0x90
+_mcl_fp_montRed3L: ## @mcl_fp_montRed3L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %r9
+ movq (%rcx), %rdi
+ movq (%rsi), %r15
+ movq %r15, %rbx
+ imulq %r9, %rbx
+ movq 16(%rcx), %rbp
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rbp, -24(%rsp) ## 8-byte Spill
+ movq %rax, %r11
+ movq %rdx, %r8
+ movq 8(%rcx), %rcx
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rcx, %r12
+ movq %r12, -32(%rsp) ## 8-byte Spill
+ movq %rdx, %r10
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdi, %rbx
+ movq %rbx, -16(%rsp) ## 8-byte Spill
+ movq %rdx, %rcx
+ addq %r14, %rcx
+ adcq %r11, %r10
+ adcq $0, %r8
+ movq 40(%rsi), %rdi
+ movq 32(%rsi), %r13
+ addq %r15, %rax
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r10
+ adcq 24(%rsi), %r8
+ adcq $0, %r13
+ adcq $0, %rdi
+ sbbq %r15, %r15
+ andl $1, %r15d
+ movq %rcx, %rsi
+ imulq %r9, %rsi
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %r11
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq %r12
+ movq %rdx, %r14
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, %rbx
+ addq %r12, %rbx
+ adcq %rbp, %r14
+ adcq $0, %r11
+ addq %rcx, %rax
+ adcq %r10, %rbx
+ adcq %r8, %r14
+ adcq %r13, %r11
+ adcq $0, %rdi
+ adcq $0, %r15
+ imulq %rbx, %r9
+ movq %r9, %rax
+ movq -24(%rsp), %r12 ## 8-byte Reload
+ mulq %r12
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %r9, %rax
+ movq -32(%rsp), %r13 ## 8-byte Reload
+ mulq %r13
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %r9, %rax
+ movq -16(%rsp), %rcx ## 8-byte Reload
+ mulq %rcx
+ addq %r10, %rdx
+ adcq %r8, %rsi
+ adcq $0, %rbp
+ addq %rbx, %rax
+ adcq %r14, %rdx
+ adcq %r11, %rsi
+ adcq %rdi, %rbp
+ adcq $0, %r15
+ movq %rdx, %rax
+ subq %rcx, %rax
+ movq %rsi, %rdi
+ sbbq %r13, %rdi
+ movq %rbp, %rcx
+ sbbq %r12, %rcx
+ sbbq $0, %r15
+ andl $1, %r15d
+ cmovneq %rbp, %rcx
+ testb %r15b, %r15b
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rsi, %rdi
+ movq %rdi, 8(%rdx)
+ movq %rcx, 16(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre3L
+ .p2align 4, 0x90
+_mcl_fp_addPre3L: ## @mcl_fp_addPre3L
+## BB#0:
+ movq 16(%rdx), %rax
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rax
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre3L
+ .p2align 4, 0x90
+_mcl_fp_subPre3L: ## @mcl_fp_subPre3L
+## BB#0:
+ movq 16(%rsi), %r8
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r8
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_3L
+ .p2align 4, 0x90
+_mcl_fp_shr1_3L: ## @mcl_fp_shr1_3L
+## BB#0:
+ movq 16(%rsi), %rax
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rdx
+ shrdq $1, %rdx, %rcx
+ movq %rcx, (%rdi)
+ shrdq $1, %rax, %rdx
+ movq %rdx, 8(%rdi)
+ shrq %rax
+ movq %rax, 16(%rdi)
+ retq
+
+ .globl _mcl_fp_add3L
+ .p2align 4, 0x90
+_mcl_fp_add3L: ## @mcl_fp_add3L
+## BB#0:
+ movq 16(%rdx), %r8
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r8
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB44_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r8, 16(%rdi)
+LBB44_2: ## %carry
+ retq
+
+ .globl _mcl_fp_addNF3L
+ .p2align 4, 0x90
+_mcl_fp_addNF3L: ## @mcl_fp_addNF3L
+## BB#0:
+ movq 16(%rdx), %r8
+ movq (%rdx), %r10
+ movq 8(%rdx), %r9
+ addq (%rsi), %r10
+ adcq 8(%rsi), %r9
+ adcq 16(%rsi), %r8
+ movq %r10, %rsi
+ subq (%rcx), %rsi
+ movq %r9, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r8, %rax
+ sbbq 16(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r10, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 16(%rdi)
+ retq
+
+ .globl _mcl_fp_sub3L
+ .p2align 4, 0x90
+_mcl_fp_sub3L: ## @mcl_fp_sub3L
+## BB#0:
+ movq 16(%rsi), %r8
+ movq (%rsi), %rax
+ movq 8(%rsi), %r9
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r9
+ sbbq 16(%rdx), %r8
+ movq %rax, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r8, 16(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB46_2
+## BB#1: ## %nocarry
+ retq
+LBB46_2: ## %carry
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rsi
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r8, %rsi
+ movq %rsi, 16(%rdi)
+ retq
+
+ .globl _mcl_fp_subNF3L
+ .p2align 4, 0x90
+_mcl_fp_subNF3L: ## @mcl_fp_subNF3L
+## BB#0:
+ movq 16(%rsi), %r10
+ movq (%rsi), %r8
+ movq 8(%rsi), %r9
+ subq (%rdx), %r8
+ sbbq 8(%rdx), %r9
+ sbbq 16(%rdx), %r10
+ movq %r10, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rsi
+ shldq $1, %r10, %rsi
+ andq (%rcx), %rsi
+ movq 16(%rcx), %rax
+ andq %rdx, %rax
+ andq 8(%rcx), %rdx
+ addq %r8, %rsi
+ movq %rsi, (%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r10, %rax
+ movq %rax, 16(%rdi)
+ retq
+
+ .globl _mcl_fpDbl_add3L
+ .p2align 4, 0x90
+_mcl_fpDbl_add3L: ## @mcl_fpDbl_add3L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r10
+ movq 40(%rsi), %r8
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r9
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r14, %r15
+ adcq %r11, %r9
+ adcq %r10, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %r15, %rdx
+ subq (%rcx), %rdx
+ movq %r9, %rsi
+ sbbq 8(%rcx), %rsi
+ movq %r8, %rbx
+ sbbq 16(%rcx), %rbx
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %r15, %rdx
+ movq %rdx, 24(%rdi)
+ testb %al, %al
+ cmovneq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ cmovneq %r8, %rbx
+ movq %rbx, 40(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_sub3L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub3L: ## @mcl_fpDbl_sub3L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r10
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rax
+ xorl %esi, %esi
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rax
+ movq 24(%rdx), %r15
+ movq 32(%rdx), %r12
+ sbbq 16(%rdx), %r14
+ movq %rbx, (%rdi)
+ movq %rax, 8(%rdi)
+ movq %r14, 16(%rdi)
+ sbbq %r15, %r11
+ sbbq %r12, %r9
+ sbbq %r10, %r8
+ movl $0, %eax
+ sbbq $0, %rax
+ andl $1, %eax
+ movq (%rcx), %rdx
+ cmoveq %rsi, %rdx
+ testb %al, %al
+ movq 16(%rcx), %rax
+ cmoveq %rsi, %rax
+ cmovneq 8(%rcx), %rsi
+ addq %r11, %rdx
+ movq %rdx, 24(%rdi)
+ adcq %r9, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r8, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_mulUnitPre4L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre4L: ## @mcl_fp_mulUnitPre4L
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r14, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r11, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r9, %r10
+ movq %r10, 24(%rdi)
+ adcq $0, %r8
+ movq %r8, 32(%rdi)
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fpDbl_mulPre4L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre4L: ## @mcl_fpDbl_mulPre4L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq (%rsi), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %r8
+ movq %r8, -56(%rsp) ## 8-byte Spill
+ movq (%rdx), %rbx
+ movq %rdx, %rbp
+ mulq %rbx
+ movq %rdx, %r15
+ movq 16(%rsi), %rcx
+ movq 24(%rsi), %r11
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rbx
+ movq %rdx, %r12
+ movq %rax, %r14
+ movq %rcx, %rax
+ movq %rcx, -16(%rsp) ## 8-byte Spill
+ mulq %rbx
+ movq %rdx, %r10
+ movq %rax, %r9
+ movq %r8, %rax
+ mulq %rbx
+ movq %rdx, %r13
+ movq %rax, %r8
+ addq %r15, %r8
+ adcq %r9, %r13
+ adcq %r14, %r10
+ adcq $0, %r12
+ movq %rbp, %r9
+ movq %r9, -8(%rsp) ## 8-byte Spill
+ movq 8(%r9), %rbp
+ movq %r11, %rax
+ mulq %rbp
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq %rbp
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq %rax, %rcx
+ movq -56(%rsp), %r14 ## 8-byte Reload
+ movq %r14, %rax
+ mulq %rbp
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ movq -64(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ addq %r8, %rax
+ movq %rax, 8(%rdi)
+ adcq %r13, %rbx
+ adcq %r10, %rcx
+ adcq %r12, %r15
+ sbbq %r13, %r13
+ movq 16(%r9), %rbp
+ movq %r14, %rax
+ mulq %rbp
+ movq %rax, %r12
+ movq %rdx, %r14
+ andl $1, %r13d
+ addq -48(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -40(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -32(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -24(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r11, %rax
+ mulq %rbp
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq -16(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, %r9
+ movq %rax, %r10
+ movq -64(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ addq %rbx, %rax
+ movq %rax, 16(%rdi)
+ adcq %r12, %rcx
+ adcq %r15, %r10
+ adcq %r13, %r11
+ sbbq %r13, %r13
+ andl $1, %r13d
+ addq %rdx, %rcx
+ adcq %r14, %r10
+ adcq %r9, %r11
+ adcq %r8, %r13
+ movq -8(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rbx
+ movq %rbx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r9
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %r15
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq (%rsi)
+ addq %rcx, %rax
+ movq %rax, 24(%rdi)
+ adcq %r10, %rbp
+ adcq %r11, %r12
+ adcq %r13, %r14
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rbp
+ movq %rbp, 32(%rdi)
+ adcq %r15, %r12
+ movq %r12, 40(%rdi)
+ adcq %r9, %r14
+ movq %r14, 48(%rdi)
+ adcq %r8, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre4L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre4L: ## @mcl_fpDbl_sqrPre4L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rsi, %r10
+ movq 16(%r10), %r9
+ movq 24(%r10), %r11
+ movq (%r10), %r15
+ movq 8(%r10), %r8
+ movq %r15, %rax
+ mulq %r15
+ movq %rdx, %rbp
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %r8
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %r9, %rax
+ mulq %r8
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq %r11, %rax
+ mulq %r15
+ movq %rdx, %rbx
+ movq %rax, %rcx
+ movq %r9, %rax
+ mulq %r15
+ movq %rdx, %rsi
+ movq %rsi, -16(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %r8, %rax
+ mulq %r8
+ movq %rdx, %r13
+ movq %rax, %r14
+ movq %r8, %rax
+ mulq %r15
+ addq %rax, %rbp
+ movq %rdx, %r8
+ adcq %r12, %r8
+ adcq %rsi, %rcx
+ adcq $0, %rbx
+ addq %rax, %rbp
+ movq %rbp, 8(%rdi)
+ adcq %r14, %r8
+ movq -40(%rsp), %rsi ## 8-byte Reload
+ adcq %rsi, %rcx
+ adcq -32(%rsp), %rbx ## 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq %rdx, %r8
+ adcq %r13, %rcx
+ movq -24(%rsp), %r15 ## 8-byte Reload
+ adcq %r15, %rbx
+ adcq -8(%rsp), %rbp ## 8-byte Folded Reload
+ movq %r11, %rax
+ mulq %r9
+ movq %rdx, %r14
+ movq %rax, %r11
+ movq %r9, %rax
+ mulq %r9
+ movq %rax, %r9
+ addq %r12, %r8
+ movq %r8, 16(%rdi)
+ adcq %rsi, %rcx
+ adcq %rbx, %r9
+ adcq %rbp, %r11
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -16(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r15, %r9
+ adcq %rdx, %r11
+ adcq %r14, %r12
+ movq 24(%r10), %rbp
+ movq %rbp, %rax
+ mulq 16(%r10)
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq 8(%r10)
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq (%r10)
+ movq %rdx, %r15
+ movq %rax, %rsi
+ movq %rbp, %rax
+ mulq %rbp
+ addq %rcx, %rsi
+ movq %rsi, 24(%rdi)
+ adcq %r9, %rbx
+ adcq %r11, %r14
+ adcq %r12, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r15, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r13, %r14
+ movq %r14, 40(%rdi)
+ adcq %r8, %rax
+ movq %rax, 48(%rdi)
+ adcq %rdx, %rcx
+ movq %rcx, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont4L
+ .p2align 4, 0x90
+_mcl_fp_mont4L: ## @mcl_fp_mont4L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq (%rdx), %rbp
+ mulq %rbp
+ movq %rax, %r9
+ movq %rdx, %r8
+ movq 16(%rsi), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ mulq %rbp
+ movq %rax, %rbx
+ movq %rdx, %r11
+ movq (%rsi), %rdi
+ movq %rdi, -56(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulq %rbp
+ movq %rdx, %r12
+ movq %rax, %rsi
+ movq %rdi, %rax
+ mulq %rbp
+ movq %rax, %r13
+ movq %rdx, %r15
+ addq %rsi, %r15
+ adcq %rbx, %r12
+ adcq %r9, %r11
+ adcq $0, %r8
+ movq -8(%rcx), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %r13, %rsi
+ imulq %rax, %rsi
+ movq 24(%rcx), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rax, %r10
+ movq %rdx, %r9
+ movq 16(%rcx), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rax, %r14
+ movq %rdx, %rbx
+ movq (%rcx), %rbp
+ movq %rbp, -24(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -32(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rcx
+ movq %rdx, %rdi
+ movq %rax, %rcx
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %rsi
+ addq %rcx, %rsi
+ adcq %r14, %rdi
+ adcq %r10, %rbx
+ adcq $0, %r9
+ addq %r13, %rax
+ adcq %r15, %rsi
+ adcq %r12, %rdi
+ adcq %r11, %rbx
+ adcq %r8, %r9
+ sbbq %r15, %r15
+ andl $1, %r15d
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rbp
+ movq %rbp, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r10
+ movq %rbp, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r11
+ movq %rbp, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r8
+ movq %rdx, %rbp
+ addq %r14, %rbp
+ adcq %r11, %rcx
+ adcq %r10, %r13
+ adcq $0, %r12
+ addq %rsi, %r8
+ adcq %rdi, %rbp
+ adcq %rbx, %rcx
+ adcq %r9, %r13
+ adcq %r15, %r12
+ sbbq %r15, %r15
+ andl $1, %r15d
+ movq %r8, %rsi
+ imulq -88(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r14
+ movq %rsi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ addq %r14, %rsi
+ adcq %r11, %rdi
+ adcq %r10, %rbx
+ adcq $0, %r9
+ addq %r8, %rax
+ adcq %rbp, %rsi
+ adcq %rcx, %rdi
+ adcq %r13, %rbx
+ adcq %r12, %r9
+ adcq $0, %r15
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rbp
+ movq %rbp, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r10
+ movq %rbp, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r11
+ movq %rbp, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rax, %rbp
+ movq %rdx, %r8
+ addq %r14, %r8
+ adcq %r11, %rcx
+ adcq %r10, %r13
+ adcq $0, %r12
+ addq %rsi, %rbp
+ adcq %rdi, %r8
+ adcq %rbx, %rcx
+ adcq %r9, %r13
+ adcq %r15, %r12
+ sbbq %r14, %r14
+ movq %rbp, %rsi
+ imulq -88(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ andl $1, %r14d
+ addq %r15, %r11
+ adcq %r10, %r9
+ adcq -16(%rsp), %rbx ## 8-byte Folded Reload
+ adcq $0, %rdi
+ addq %rbp, %rax
+ adcq %r8, %r11
+ adcq %rcx, %r9
+ adcq %r13, %rbx
+ adcq %r12, %rdi
+ adcq $0, %r14
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rcx
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq %rcx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %rbp
+ addq %r13, %rbp
+ adcq %r15, %rsi
+ adcq -96(%rsp), %r12 ## 8-byte Folded Reload
+ adcq $0, %r8
+ addq %r11, %r10
+ adcq %r9, %rbp
+ adcq %rbx, %rsi
+ adcq %rdi, %r12
+ adcq %r14, %r8
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ movq -88(%rsp), %rcx ## 8-byte Reload
+ imulq %r10, %rcx
+ movq %rcx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %rbx
+ movq %rcx, %rax
+ movq %rcx, %r9
+ movq -32(%rsp), %r11 ## 8-byte Reload
+ mulq %r11
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %r9, %rax
+ movq -24(%rsp), %r9 ## 8-byte Reload
+ mulq %r9
+ addq %r14, %rdx
+ adcq %rbx, %rcx
+ adcq -88(%rsp), %r15 ## 8-byte Folded Reload
+ adcq $0, %r13
+ addq %r10, %rax
+ adcq %rbp, %rdx
+ adcq %rsi, %rcx
+ adcq %r12, %r15
+ adcq %r8, %r13
+ adcq $0, %rdi
+ movq %rdx, %rax
+ subq %r9, %rax
+ movq %rcx, %rsi
+ sbbq %r11, %rsi
+ movq %r15, %rbp
+ sbbq -80(%rsp), %rbp ## 8-byte Folded Reload
+ movq %r13, %rbx
+ sbbq -72(%rsp), %rbx ## 8-byte Folded Reload
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %r13, %rbx
+ testb %dil, %dil
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rcx, %rsi
+ movq %rsi, 8(%rdx)
+ cmovneq %r15, %rbp
+ movq %rbp, 16(%rdx)
+ movq %rbx, 24(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF4L
+ .p2align 4, 0x90
+_mcl_fp_montNF4L: ## @mcl_fp_montNF4L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %r15
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq (%r15), %rdi
+ movq %r15, -24(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r8
+ movq %rdx, %r12
+ movq 16(%rsi), %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r14
+ movq %rdx, %r10
+ movq (%rsi), %rbp
+ movq %rbp, -56(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, %rbx
+ movq %rax, %rsi
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, %r11
+ movq %rdx, %r9
+ addq %rsi, %r9
+ adcq %r14, %rbx
+ adcq %r8, %r10
+ adcq $0, %r12
+ movq -8(%rcx), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %r11, %rsi
+ imulq %rax, %rsi
+ movq 24(%rcx), %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rax, %r13
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rax, %r8
+ movq %rdx, %r14
+ movq (%rcx), %rdi
+ movq %rdi, -72(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -32(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rcx
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq %rdi
+ addq %r11, %rax
+ adcq %r9, %rbp
+ adcq %rbx, %r8
+ adcq %r10, %r13
+ adcq $0, %r12
+ addq %rdx, %rbp
+ adcq %rcx, %r8
+ adcq %r14, %r13
+ adcq -16(%rsp), %r12 ## 8-byte Folded Reload
+ movq 8(%r15), %rdi
+ movq %rdi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %rsi
+ movq %rdi, %rax
+ mulq -96(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rax, %rdi
+ movq %rdx, %r9
+ addq %r14, %r9
+ adcq %r11, %rcx
+ adcq %rsi, %r10
+ adcq $0, %rbx
+ addq %rbp, %rdi
+ adcq %r8, %r9
+ adcq %r13, %rcx
+ adcq %r12, %r10
+ adcq $0, %rbx
+ movq %rdi, %rsi
+ imulq -80(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r13
+ movq %rsi, %rax
+ movq -32(%rsp), %r15 ## 8-byte Reload
+ mulq %r15
+ movq %rdx, %r14
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ addq %rdi, %rax
+ adcq %r9, %rbp
+ adcq %rcx, %r13
+ adcq %r10, %r12
+ adcq $0, %rbx
+ addq %rdx, %rbp
+ adcq %r14, %r13
+ adcq %r11, %r12
+ adcq %r8, %rbx
+ movq -24(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdi
+ movq %rdi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -96(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %rdi
+ addq %r14, %rdi
+ adcq %r11, %rcx
+ adcq %r10, %r8
+ adcq $0, %rsi
+ addq %rbp, %r9
+ adcq %r13, %rdi
+ adcq %r12, %rcx
+ adcq %rbx, %r8
+ adcq $0, %rsi
+ movq %r9, %rbx
+ imulq -80(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq %r15
+ movq %rdx, %r14
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ addq %r9, %rax
+ adcq %rdi, %rbp
+ adcq %rcx, %r13
+ adcq %r8, %r12
+ adcq $0, %rsi
+ addq %rdx, %rbp
+ adcq %r14, %r13
+ adcq %r11, %r12
+ adcq %r10, %rsi
+ movq -24(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdi
+ movq %rdi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %rcx
+ movq %rdi, %rax
+ mulq -96(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %rdi
+ addq %r14, %rdi
+ adcq %r11, %r10
+ adcq %rcx, %r8
+ adcq $0, %rbx
+ addq %rbp, %r9
+ adcq %r13, %rdi
+ adcq %r12, %r10
+ adcq %rsi, %r8
+ adcq $0, %rbx
+ movq -80(%rsp), %rcx ## 8-byte Reload
+ imulq %r9, %rcx
+ movq %rcx, %rax
+ movq -40(%rsp), %r12 ## 8-byte Reload
+ mulq %r12
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %rcx, %rax
+ movq -48(%rsp), %r11 ## 8-byte Reload
+ mulq %r11
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ movq %rcx, %rax
+ movq %rcx, %r15
+ movq -72(%rsp), %rsi ## 8-byte Reload
+ mulq %rsi
+ movq %rdx, %r14
+ movq %rax, %rcx
+ movq %r15, %rax
+ movq -32(%rsp), %r15 ## 8-byte Reload
+ mulq %r15
+ addq %r9, %rcx
+ adcq %rdi, %rax
+ adcq %r10, %rbp
+ adcq %r8, %r13
+ adcq $0, %rbx
+ addq %r14, %rax
+ adcq %rdx, %rbp
+ adcq -96(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -88(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rax, %rcx
+ subq %rsi, %rcx
+ movq %rbp, %rdx
+ sbbq %r15, %rdx
+ movq %r13, %rdi
+ sbbq %r11, %rdi
+ movq %rbx, %rsi
+ sbbq %r12, %rsi
+ cmovsq %rax, %rcx
+ movq -8(%rsp), %rax ## 8-byte Reload
+ movq %rcx, (%rax)
+ cmovsq %rbp, %rdx
+ movq %rdx, 8(%rax)
+ cmovsq %r13, %rdi
+ movq %rdi, 16(%rax)
+ cmovsq %rbx, %rsi
+ movq %rsi, 24(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed4L
+ .p2align 4, 0x90
+_mcl_fp_montRed4L: ## @mcl_fp_montRed4L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq (%rcx), %rdi
+ movq %rdi, -32(%rsp) ## 8-byte Spill
+ movq (%rsi), %r12
+ movq %r12, %rbx
+ imulq %rax, %rbx
+ movq %rax, %r9
+ movq %r9, -64(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r11
+ movq %rdx, %r8
+ movq 16(%rcx), %rbp
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rbp, %r13
+ movq %r13, -24(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %rdx, %r10
+ movq 8(%rcx), %rcx
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rcx, %rbp
+ movq %rbp, -16(%rsp) ## 8-byte Spill
+ movq %rdx, %r15
+ movq %rax, %rcx
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdx, %rbx
+ addq %rcx, %rbx
+ adcq %r14, %r15
+ adcq %r11, %r10
+ adcq $0, %r8
+ movq 56(%rsi), %rcx
+ movq 48(%rsi), %rdx
+ addq %r12, %rax
+ movq 40(%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %r10
+ adcq 32(%rsi), %r8
+ adcq $0, %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, %r12
+ adcq $0, %rcx
+ movq %rcx, -72(%rsp) ## 8-byte Spill
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ movq %rbx, %rsi
+ imulq %r9, %rsi
+ movq %rsi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %r13
+ movq %rdx, %r14
+ movq %rax, %r9
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ movq %rsi, %rax
+ movq -32(%rsp), %r13 ## 8-byte Reload
+ mulq %r13
+ movq %rdx, %rsi
+ addq %rbp, %rsi
+ adcq %r9, %rcx
+ adcq -56(%rsp), %r14 ## 8-byte Folded Reload
+ adcq $0, %r11
+ addq %rbx, %rax
+ adcq %r15, %rsi
+ adcq %r10, %rcx
+ adcq %r8, %r14
+ adcq -48(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %r12
+ movq %r12, -48(%rsp) ## 8-byte Spill
+ movq -72(%rsp), %rbp ## 8-byte Reload
+ adcq $0, %rbp
+ adcq $0, %rdi
+ movq %rsi, %rbx
+ imulq -64(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ movq -40(%rsp), %r12 ## 8-byte Reload
+ mulq %r12
+ movq %rdx, %r8
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r9
+ movq %rbx, %rax
+ mulq %r13
+ movq %rdx, %rbx
+ addq %r9, %rbx
+ adcq -56(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %r8
+ addq %rsi, %rax
+ adcq %rcx, %rbx
+ adcq %r14, %r15
+ adcq %r11, %r10
+ adcq -48(%rsp), %r8 ## 8-byte Folded Reload
+ adcq $0, %rbp
+ movq %rbp, -72(%rsp) ## 8-byte Spill
+ adcq $0, %rdi
+ movq -64(%rsp), %rcx ## 8-byte Reload
+ imulq %rbx, %rcx
+ movq %rcx, %rax
+ mulq %r12
+ movq %rdx, %r13
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ movq -24(%rsp), %r14 ## 8-byte Reload
+ mulq %r14
+ movq %rdx, %r11
+ movq %rax, %r12
+ movq %rcx, %rax
+ movq %rcx, %r9
+ movq -16(%rsp), %rsi ## 8-byte Reload
+ mulq %rsi
+ movq %rdx, %rbp
+ movq %rax, %rcx
+ movq %r9, %rax
+ movq -32(%rsp), %r9 ## 8-byte Reload
+ mulq %r9
+ addq %rcx, %rdx
+ adcq %r12, %rbp
+ adcq -64(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rbx, %rax
+ adcq %r15, %rdx
+ adcq %r10, %rbp
+ adcq %r8, %r11
+ adcq -72(%rsp), %r13 ## 8-byte Folded Reload
+ adcq $0, %rdi
+ movq %rdx, %rax
+ subq %r9, %rax
+ movq %rbp, %rcx
+ sbbq %rsi, %rcx
+ movq %r11, %rbx
+ sbbq %r14, %rbx
+ movq %r13, %rsi
+ sbbq -40(%rsp), %rsi ## 8-byte Folded Reload
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %r13, %rsi
+ testb %dil, %dil
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rbp, %rcx
+ movq %rcx, 8(%rdx)
+ cmovneq %r11, %rbx
+ movq %rbx, 16(%rdx)
+ movq %rsi, 24(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre4L
+ .p2align 4, 0x90
+_mcl_fp_addPre4L: ## @mcl_fp_addPre4L
+## BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rdx), %rax
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rax
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ adcq %r8, %r9
+ movq %r9, 24(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre4L
+ .p2align 4, 0x90
+_mcl_fp_subPre4L: ## @mcl_fp_subPre4L
+## BB#0:
+ movq 24(%rdx), %r8
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %rcx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r10, 16(%rdi)
+ sbbq %r8, %r9
+ movq %r9, 24(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_4L
+ .p2align 4, 0x90
+_mcl_fp_shr1_4L: ## @mcl_fp_shr1_4L
+## BB#0:
+ movq 24(%rsi), %rax
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rdx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rdx
+ movq %rdx, (%rdi)
+ shrdq $1, %rcx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rax, %rcx
+ movq %rcx, 16(%rdi)
+ shrq %rax
+ movq %rax, 24(%rdi)
+ retq
+
+ .globl _mcl_fp_add4L
+ .p2align 4, 0x90
+_mcl_fp_add4L: ## @mcl_fp_add4L
+## BB#0:
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %r8
+ movq 16(%rdx), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r9
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r9, 16(%rdi)
+ adcq %r10, %r8
+ movq %r8, 24(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r9
+ sbbq 24(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB59_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r9, 16(%rdi)
+ movq %r8, 24(%rdi)
+LBB59_2: ## %carry
+ retq
+
+ .globl _mcl_fp_addNF4L
+ .p2align 4, 0x90
+_mcl_fp_addNF4L: ## @mcl_fp_addNF4L
+## BB#0:
+ pushq %rbx
+ movq 24(%rdx), %r8
+ movq 16(%rdx), %r9
+ movq (%rdx), %r11
+ movq 8(%rdx), %r10
+ addq (%rsi), %r11
+ adcq 8(%rsi), %r10
+ adcq 16(%rsi), %r9
+ adcq 24(%rsi), %r8
+ movq %r11, %rsi
+ subq (%rcx), %rsi
+ movq %r10, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r9, %rax
+ sbbq 16(%rcx), %rax
+ movq %r8, %rbx
+ sbbq 24(%rcx), %rbx
+ testq %rbx, %rbx
+ cmovsq %r11, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r10, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r9, %rax
+ movq %rax, 16(%rdi)
+ cmovsq %r8, %rbx
+ movq %rbx, 24(%rdi)
+ popq %rbx
+ retq
+
+ .globl _mcl_fp_sub4L
+ .p2align 4, 0x90
+_mcl_fp_sub4L: ## @mcl_fp_sub4L
+## BB#0:
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %r8
+ movq 16(%rsi), %r9
+ movq (%rsi), %rax
+ movq 8(%rsi), %r11
+ xorl %esi, %esi
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r9
+ movq %rax, (%rdi)
+ movq %r11, 8(%rdi)
+ movq %r9, 16(%rdi)
+ sbbq %r10, %r8
+ movq %r8, 24(%rdi)
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB61_2
+## BB#1: ## %nocarry
+ retq
+LBB61_2: ## %carry
+ movq 24(%rcx), %r10
+ movq 8(%rcx), %rsi
+ movq 16(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r8, %r10
+ movq %r10, 24(%rdi)
+ retq
+
+ .globl _mcl_fp_subNF4L
+ .p2align 4, 0x90
+_mcl_fp_subNF4L: ## @mcl_fp_subNF4L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r8
+ movdqu (%rsi), %xmm2
+ movdqu 16(%rsi), %xmm3
+ pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1]
+ movd %xmm4, %r15
+ movd %xmm1, %r9
+ movd %xmm3, %r11
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %r10
+ pshufd $78, %xmm2, %xmm1 ## xmm1 = xmm2[2,3,0,1]
+ movd %xmm1, %r14
+ movd %xmm0, %rdx
+ movd %xmm2, %r12
+ subq %rdx, %r12
+ sbbq %r10, %r14
+ sbbq %r9, %r11
+ sbbq %r8, %r15
+ movq %r15, %rdx
+ sarq $63, %rdx
+ movq 24(%rcx), %rsi
+ andq %rdx, %rsi
+ movq 16(%rcx), %rax
+ andq %rdx, %rax
+ movq 8(%rcx), %rbx
+ andq %rdx, %rbx
+ andq (%rcx), %rdx
+ addq %r12, %rdx
+ movq %rdx, (%rdi)
+ adcq %r14, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r11, %rax
+ movq %rax, 16(%rdi)
+ adcq %r15, %rsi
+ movq %rsi, 24(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_add4L
+ .p2align 4, 0x90
+_mcl_fpDbl_add4L: ## @mcl_fpDbl_add4L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r9
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r10
+ movq 48(%rsi), %r12
+ movq 40(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq 24(%rdx), %r15
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %rsi
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r15, %rbp
+ movq %rbp, 24(%rdi)
+ adcq %r14, %rsi
+ adcq %r11, %r13
+ adcq %r10, %r12
+ adcq %r9, %r8
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rsi, %rdx
+ subq (%rcx), %rdx
+ movq %r13, %rbp
+ sbbq 8(%rcx), %rbp
+ movq %r12, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r8, %r9
+ sbbq 24(%rcx), %r9
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rsi, %rdx
+ movq %rdx, 32(%rdi)
+ testb %al, %al
+ cmovneq %r13, %rbp
+ movq %rbp, 40(%rdi)
+ cmovneq %r12, %rbx
+ movq %rbx, 48(%rdi)
+ cmovneq %r8, %r9
+ movq %r9, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub4L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub4L: ## @mcl_fpDbl_sub4L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r9
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq (%rsi), %rbx
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ movq %rbx, (%rdi)
+ movq 8(%rsi), %rbx
+ sbbq 8(%rdx), %rbx
+ movq %rbx, 8(%rdi)
+ movq 16(%rsi), %rbx
+ sbbq 16(%rdx), %rbx
+ movq %rbx, 16(%rdi)
+ movq 24(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 40(%rdx), %r11
+ movq 32(%rdx), %rdx
+ movq %rbx, 24(%rdi)
+ movq 32(%rsi), %r12
+ sbbq %rdx, %r12
+ movq 48(%rsi), %r14
+ movq 40(%rsi), %r15
+ sbbq %r11, %r15
+ sbbq %r10, %r14
+ sbbq %r9, %r8
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ movq 16(%rcx), %rdx
+ cmoveq %rax, %rdx
+ movq 24(%rcx), %rbx
+ cmoveq %rax, %rbx
+ cmovneq 8(%rcx), %rax
+ addq %r12, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r15, %rax
+ movq %rax, 40(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 48(%rdi)
+ adcq %r8, %rbx
+ movq %rbx, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_mulUnitPre5L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre5L: ## @mcl_fp_mulUnitPre5L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r15
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r12, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r14, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r11, %r15
+ movq %r15, 24(%rdi)
+ adcq %r9, %r10
+ movq %r10, 32(%rdi)
+ adcq $0, %r8
+ movq %r8, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_mulPre5L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre5L: ## @mcl_fpDbl_mulPre5L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rsi, %r9
+ movq %rdi, -48(%rsp) ## 8-byte Spill
+ movq (%r9), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq (%rdx), %rbp
+ mulq %rbp
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 16(%r9), %r13
+ movq 24(%r9), %r15
+ movq 32(%r9), %rbx
+ movq %rax, (%rdi)
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rdx, %r11
+ movq %rax, %r10
+ movq %r15, %rax
+ mulq %rbp
+ movq %rdx, %r14
+ movq %rax, %rdi
+ movq %r13, %rax
+ mulq %rbp
+ movq %rax, %rsi
+ movq %rdx, %rcx
+ movq 8(%r9), %r8
+ movq %r8, %rax
+ mulq %rbp
+ movq %rdx, %rbp
+ movq %rax, %r12
+ addq -88(%rsp), %r12 ## 8-byte Folded Reload
+ adcq %rsi, %rbp
+ adcq %rdi, %rcx
+ adcq %r10, %r14
+ adcq $0, %r11
+ movq -72(%rsp), %r10 ## 8-byte Reload
+ movq 8(%r10), %rdi
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %rsi
+ movq %r15, %rax
+ mulq %rdi
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %r13, %rax
+ mulq %rdi
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %r8, %rax
+ mulq %rdi
+ movq %rdx, %r8
+ movq %rax, %rbx
+ movq -80(%rsp), %rax ## 8-byte Reload
+ mulq %rdi
+ addq %r12, %rax
+ movq -48(%rsp), %rdi ## 8-byte Reload
+ movq %rax, 8(%rdi)
+ adcq %rbp, %rbx
+ adcq %rcx, %r13
+ adcq %r14, %r15
+ adcq %r11, %rsi
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %rdx, %rbx
+ adcq %r8, %r13
+ adcq -56(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ movq 32(%r9), %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq 16(%r10), %r12
+ mulq %r12
+ movq %rax, %r11
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq 24(%r9), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulq %r12
+ movq %rax, %r10
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 16(%r9), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %r12
+ movq %rax, %r8
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq 8(%r9), %rdi
+ movq %rdi, %rax
+ mulq %r12
+ movq %rax, %rbp
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq (%r9), %r14
+ movq %r14, %rax
+ mulq %r12
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ addq %rbx, %rax
+ movq -48(%rsp), %rbx ## 8-byte Reload
+ movq %rax, 16(%rbx)
+ adcq %r13, %rbp
+ adcq %r15, %r8
+ adcq %rsi, %r10
+ adcq %rcx, %r11
+ sbbq %rsi, %rsi
+ movq -72(%rsp), %r12 ## 8-byte Reload
+ movq 24(%r12), %rcx
+ movq -96(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %r14, %rax
+ mulq %rcx
+ movq %rdx, %r13
+ movq %rax, %rdi
+ movq -64(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq -32(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ andl $1, %esi
+ addq -40(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -16(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -56(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -80(%rsp), %rsi ## 8-byte Folded Reload
+ addq %rdi, %rbp
+ movq %rbp, 24(%rbx)
+ adcq %r15, %r8
+ adcq %rax, %r10
+ adcq %r14, %r11
+ adcq -24(%rsp), %rsi ## 8-byte Folded Reload
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r13, %r8
+ adcq -8(%rsp), %r10 ## 8-byte Folded Reload
+ adcq %rdx, %r11
+ adcq -64(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -96(%rsp), %rcx ## 8-byte Folded Reload
+ movq 32(%r12), %rdi
+ movq %rdi, %rax
+ mulq 32(%r9)
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq 24(%r9)
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq 16(%r9)
+ movq %rdx, %r14
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq 8(%r9)
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq (%r9)
+ addq %r8, %rax
+ movq -48(%rsp), %rdi ## 8-byte Reload
+ movq %rax, 32(%rdi)
+ adcq %r10, %rbp
+ adcq %r11, %rbx
+ adcq %rsi, %r13
+ adcq %rcx, %r15
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rbp
+ movq %rbp, 40(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r14, %r13
+ movq %r13, 56(%rdi)
+ adcq -80(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, 64(%rdi)
+ adcq -72(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre5L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre5L: ## @mcl_fpDbl_sqrPre5L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 32(%rsi), %r11
+ movq (%rsi), %rbp
+ movq 8(%rsi), %r13
+ movq %r11, %rax
+ mulq %r13
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %rbx
+ movq %rbx, %rax
+ mulq %r13
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %rcx
+ movq %rcx, %rax
+ mulq %r13
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %r11, %rax
+ mulq %rbp
+ movq %rdx, %r8
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rbp
+ movq %rdx, %r9
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq %rbp
+ movq %rdx, %r10
+ movq %rax, %r12
+ movq %r13, %rax
+ mulq %r13
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %r13, %rax
+ mulq %rbp
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq %rbp
+ movq %rdi, -24(%rsp) ## 8-byte Spill
+ movq %rax, (%rdi)
+ addq %rbx, %rdx
+ adcq %r13, %r12
+ adcq %r15, %r10
+ adcq -16(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r8
+ addq %rbx, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r14, %r12
+ adcq -32(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -64(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -56(%rsp), %r8 ## 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq %r13, %r12
+ adcq -8(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -48(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %r11, %rax
+ mulq %rcx
+ movq %rax, %r11
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %rbx
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rax, %r14
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r15
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq (%rsi), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rax, %r13
+ addq %r12, %rdi
+ movq -24(%rsp), %r12 ## 8-byte Reload
+ movq %rdi, 16(%r12)
+ adcq %r10, %r15
+ adcq %r9, %r13
+ adcq %r8, %r14
+ adcq %rbp, %r11
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ addq -32(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -64(%rsp), %r13 ## 8-byte Folded Reload
+ adcq %rdx, %r14
+ adcq -72(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -40(%rsp), %rdi ## 8-byte Folded Reload
+ movq -56(%rsp), %rax ## 8-byte Reload
+ mulq %rbx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq -48(%rsp), %rax ## 8-byte Reload
+ mulq %rbx
+ movq %rax, %rbp
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %rcx
+ movq %rcx, %rax
+ mulq %rbx
+ movq %rax, %r9
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulq %rbx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, %r10
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rax, %rbx
+ addq %r15, %rbp
+ movq %rbp, 24(%r12)
+ adcq %r13, %r8
+ adcq %r14, %r10
+ adcq %r11, %rbx
+ adcq %rdi, %r9
+ sbbq %r12, %r12
+ andl $1, %r12d
+ addq -56(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -64(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %rdx, %r9
+ adcq -48(%rsp), %r12 ## 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %r14
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %r15
+ movq %rax, %r11
+ movq -40(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ addq %r8, %rsi
+ movq -24(%rsp), %r8 ## 8-byte Reload
+ movq %rsi, 32(%r8)
+ adcq %r10, %rdi
+ adcq %rbx, %rax
+ adcq %r9, %rbp
+ adcq %r12, %r11
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r13, %rdi
+ movq %r8, %rsi
+ movq %rdi, 40(%rsi)
+ adcq %r14, %rax
+ movq %rax, 48(%rsi)
+ adcq %rdx, %rbp
+ movq %rbp, 56(%rsi)
+ adcq -72(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 64(%rsi)
+ adcq %r15, %rcx
+ movq %rcx, 72(%rsi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont5L
+ .p2align 4, 0x90
+_mcl_fp_mont5L: ## @mcl_fp_mont5L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ pushq %rax
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rdi, (%rsp) ## 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdi
+ mulq %rdi
+ movq %rax, %r8
+ movq %rdx, %r15
+ movq 24(%rsi), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r10
+ movq %rdx, %rbx
+ movq 16(%rsi), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r11
+ movq %rdx, %r14
+ movq (%rsi), %rbp
+ movq %rbp, -24(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, %r12
+ movq %rax, %rsi
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rdx, %r9
+ addq %rsi, %r9
+ adcq %r11, %r12
+ adcq %r10, %r14
+ adcq %r8, %rbx
+ movq %rbx, -120(%rsp) ## 8-byte Spill
+ adcq $0, %r15
+ movq %r15, -112(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ imulq %rdx, %rbp
+ movq 32(%rcx), %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rdx, %r8
+ movq 24(%rcx), %rdx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r13
+ movq %rdx, %rsi
+ movq 16(%rcx), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r11
+ movq %rdx, %rbx
+ movq (%rcx), %rdi
+ movq %rdi, -16(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -64(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, %r10
+ movq %rax, %r15
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rdx, %rcx
+ addq %r15, %rcx
+ adcq %r11, %r10
+ adcq %r13, %rbx
+ adcq -8(%rsp), %rsi ## 8-byte Folded Reload
+ adcq $0, %r8
+ addq -128(%rsp), %rax ## 8-byte Folded Reload
+ adcq %r9, %rcx
+ adcq %r12, %r10
+ adcq %r14, %rbx
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -112(%rsp), %r8 ## 8-byte Folded Reload
+ sbbq %r15, %r15
+ andl $1, %r15d
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdi
+ movq %rdi, %rax
+ mulq -104(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %r12
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %rdi
+ movq %rdx, %r11
+ addq %r12, %r11
+ adcq -128(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rcx, %rdi
+ adcq %r10, %r11
+ adcq %rbx, %r9
+ adcq %rsi, %rbp
+ adcq %r8, %r14
+ adcq %r15, %r13
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rdi, %rbx
+ imulq -72(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ addq %r12, %rbx
+ adcq %r15, %rcx
+ adcq -128(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -120(%rsp), %r8 ## 8-byte Folded Reload
+ adcq $0, %r10
+ addq %rdi, %rax
+ adcq %r11, %rbx
+ adcq %r9, %rcx
+ adcq %rbp, %rsi
+ adcq %r14, %r8
+ adcq %r13, %r10
+ adcq $0, -112(%rsp) ## 8-byte Folded Spill
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rbp
+ movq %rbp, %rax
+ mulq -104(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r15
+ movq %rdx, %rbp
+ addq %r12, %rbp
+ adcq %r14, %rdi
+ adcq -128(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r13
+ addq %rbx, %r15
+ adcq %rcx, %rbp
+ adcq %rsi, %rdi
+ adcq %r8, %r11
+ adcq %r10, %r9
+ adcq -112(%rsp), %r13 ## 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %r15, %rsi
+ imulq -72(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %rsi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ addq %r8, %r12
+ adcq -8(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -120(%rsp), %r14 ## 8-byte Folded Reload
+ adcq $0, %r10
+ addq %r15, %rax
+ adcq %rbp, %r12
+ adcq %rdi, %rbx
+ adcq %r11, %rcx
+ adcq %r9, %r14
+ adcq %r13, %r10
+ adcq $0, -112(%rsp) ## 8-byte Folded Spill
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rsi
+ movq %rsi, %rax
+ mulq -104(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r15
+ movq %rsi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r13
+ movq %rsi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %rsi
+ addq %r13, %rsi
+ adcq %r15, %rdi
+ adcq -128(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r8
+ addq %r12, %r11
+ adcq %rbx, %rsi
+ adcq %rcx, %rdi
+ adcq %r14, %rbp
+ adcq %r10, %r9
+ adcq -112(%rsp), %r8 ## 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %r11, %rbx
+ imulq -72(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ addq %r12, %rbx
+ adcq %r14, %rcx
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %r13
+ addq %r11, %rax
+ adcq %rsi, %rbx
+ adcq %rdi, %rcx
+ adcq %rbp, %r15
+ adcq %r9, %r10
+ adcq %r8, %r13
+ movq -112(%rsp), %r8 ## 8-byte Reload
+ adcq $0, %r8
+ movq -96(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rsi
+ movq %rsi, %rax
+ mulq -104(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r14
+ movq %rdx, %rbp
+ addq %rdi, %rbp
+ adcq -88(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r9 ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rbx, %r14
+ adcq %rcx, %rbp
+ adcq %r15, %r12
+ adcq %r10, %r11
+ adcq %r13, %r9
+ adcq %r8, %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ sbbq %rcx, %rcx
+ movq -72(%rsp), %rdi ## 8-byte Reload
+ imulq %r14, %rdi
+ movq %rdi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq %rdi, %rax
+ movq %rdi, %r15
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r10
+ movq %r15, %rax
+ movq -16(%rsp), %r15 ## 8-byte Reload
+ mulq %r15
+ addq %r10, %rdx
+ adcq %r13, %rdi
+ adcq -104(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -72(%rsp), %rbx ## 8-byte Folded Reload
+ adcq $0, %r8
+ andl $1, %ecx
+ addq %r14, %rax
+ adcq %rbp, %rdx
+ adcq %r12, %rdi
+ adcq %r11, %rsi
+ adcq %r9, %rbx
+ adcq -96(%rsp), %r8 ## 8-byte Folded Reload
+ adcq $0, %rcx
+ movq %rdx, %rax
+ subq %r15, %rax
+ movq %rdi, %rbp
+ sbbq -64(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rsi, %r9
+ sbbq -56(%rsp), %r9 ## 8-byte Folded Reload
+ movq %rbx, %r10
+ sbbq -48(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r8, %r11
+ sbbq -40(%rsp), %r11 ## 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rbx, %r10
+ testb %cl, %cl
+ cmovneq %rdx, %rax
+ movq (%rsp), %rcx ## 8-byte Reload
+ movq %rax, (%rcx)
+ cmovneq %rdi, %rbp
+ movq %rbp, 8(%rcx)
+ cmovneq %rsi, %r9
+ movq %r9, 16(%rcx)
+ movq %r10, 24(%rcx)
+ cmovneq %r8, %r11
+ movq %r11, 32(%rcx)
+ addq $8, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF5L
+ .p2align 4, 0x90
+_mcl_fp_montNF5L: ## @mcl_fp_montNF5L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq (%rdx), %rbp
+ mulq %rbp
+ movq %rax, %r8
+ movq %rdx, %r13
+ movq 24(%rsi), %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ mulq %rbp
+ movq %rax, %r10
+ movq %rdx, %r11
+ movq 16(%rsi), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %rbp
+ movq %rax, %r15
+ movq %rdx, %r9
+ movq (%rsi), %rdi
+ movq %rdi, -48(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulq %rbp
+ movq %rdx, %r12
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq %rbp
+ movq %rax, %r14
+ movq %rdx, %rbp
+ addq %rbx, %rbp
+ adcq %r15, %r12
+ adcq %r10, %r9
+ adcq %r8, %r11
+ adcq $0, %r13
+ movq -8(%rcx), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %r14, %rsi
+ imulq %rax, %rsi
+ movq 32(%rcx), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rax, %r10
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rax, %rbx
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rdx
+ movq %rax, %r8
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq (%rcx), %rdi
+ movq %rdi, -40(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -24(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rcx
+ movq %rdx, %r15
+ movq %rax, %rcx
+ movq %rsi, %rax
+ mulq %rdi
+ addq %r14, %rax
+ adcq %rbp, %rcx
+ adcq %r12, %r8
+ adcq %r9, %rbx
+ adcq %r11, %r10
+ adcq $0, %r13
+ addq %rdx, %rcx
+ adcq %r15, %r8
+ adcq -16(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -128(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r13 ## 8-byte Folded Reload
+ movq -104(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rsi
+ movq %rsi, %rax
+ mulq -112(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -96(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r14
+ movq %rsi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rax, %rsi
+ movq %rdx, %r15
+ addq %r14, %r15
+ adcq %rdi, %r11
+ adcq -128(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rbp ## 8-byte Folded Reload
+ adcq $0, %r12
+ addq %rcx, %rsi
+ adcq %r8, %r15
+ adcq %rbx, %r11
+ adcq %r10, %r9
+ adcq %r13, %rbp
+ adcq $0, %r12
+ movq %rsi, %rdi
+ imulq -88(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ addq %rsi, %rax
+ adcq %r15, %r10
+ adcq %r11, %r14
+ adcq %r9, %r8
+ adcq %rbp, %r13
+ adcq $0, %r12
+ addq %rdx, %r10
+ adcq %rbx, %r14
+ adcq %rcx, %r8
+ adcq -128(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r12 ## 8-byte Folded Reload
+ movq -104(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rsi
+ movq %rsi, %rax
+ mulq -112(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -96(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %rsi
+ addq %rbp, %rsi
+ adcq %rbx, %rcx
+ adcq -128(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r15
+ addq %r10, %r11
+ adcq %r14, %rsi
+ adcq %r8, %rcx
+ adcq %r13, %rdi
+ adcq %r12, %r9
+ adcq $0, %r15
+ movq %r11, %rbx
+ imulq -88(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %r10
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ addq %r11, %rax
+ adcq %rsi, %rbp
+ adcq %rcx, %r10
+ adcq %rdi, %r8
+ adcq %r9, %r13
+ adcq $0, %r15
+ addq %rdx, %rbp
+ adcq %r12, %r10
+ adcq %r14, %r8
+ adcq -128(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r15 ## 8-byte Folded Reload
+ movq -104(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rsi
+ movq %rsi, %rax
+ mulq -112(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -96(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r14
+ movq %rdx, %rsi
+ addq %r12, %rsi
+ adcq %rbx, %rcx
+ adcq -128(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r11
+ addq %rbp, %r14
+ adcq %r10, %rsi
+ adcq %r8, %rcx
+ adcq %r13, %rdi
+ adcq %r15, %r9
+ adcq $0, %r11
+ movq %r14, %rbx
+ imulq -88(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r10
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ addq %r14, %rax
+ adcq %rsi, %rbp
+ adcq %rcx, %r10
+ adcq %rdi, %r8
+ adcq %r9, %r13
+ adcq $0, %r11
+ addq %rdx, %rbp
+ adcq %r12, %r10
+ adcq %r15, %r8
+ adcq -128(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r11 ## 8-byte Folded Reload
+ movq -104(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rcx
+ movq %rcx, %rax
+ mulq -112(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -96(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r12
+ movq %rdx, %rdi
+ addq %rsi, %rdi
+ adcq -96(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %rbx
+ addq %rbp, %r12
+ adcq %r10, %rdi
+ adcq %r8, %r15
+ adcq %r13, %r14
+ adcq %r11, %r9
+ adcq $0, %rbx
+ movq -88(%rsp), %r8 ## 8-byte Reload
+ imulq %r12, %r8
+ movq %r8, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %rcx
+ movq %r8, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ movq %r8, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, %rsi
+ movq %r8, %rax
+ movq %r8, %r13
+ movq -40(%rsp), %r10 ## 8-byte Reload
+ mulq %r10
+ movq %rdx, %r11
+ movq %rax, %r8
+ movq %r13, %rax
+ movq -24(%rsp), %r13 ## 8-byte Reload
+ mulq %r13
+ addq %r12, %r8
+ adcq %rdi, %rax
+ adcq %r15, %rsi
+ adcq %r14, %rbp
+ adcq %r9, %rcx
+ adcq $0, %rbx
+ addq %r11, %rax
+ adcq %rdx, %rsi
+ adcq -112(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -104(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -88(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rax, %r11
+ subq %r10, %r11
+ movq %rsi, %r10
+ sbbq %r13, %r10
+ movq %rbp, %r8
+ sbbq -80(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rcx, %r9
+ sbbq -72(%rsp), %r9 ## 8-byte Folded Reload
+ movq %rbx, %rdx
+ sbbq -64(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ sarq $63, %rdi
+ cmovsq %rax, %r11
+ movq -8(%rsp), %rax ## 8-byte Reload
+ movq %r11, (%rax)
+ cmovsq %rsi, %r10
+ movq %r10, 8(%rax)
+ cmovsq %rbp, %r8
+ movq %r8, 16(%rax)
+ cmovsq %rcx, %r9
+ movq %r9, 24(%rax)
+ cmovsq %rbx, %rdx
+ movq %rdx, 32(%rax)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed5L
+ .p2align 4, 0x90
+_mcl_fp_montRed5L: ## @mcl_fp_montRed5L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq (%rsi), %r9
+ movq %r9, %rdi
+ imulq %rax, %rdi
+ movq 32(%rcx), %rdx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rax, %r8
+ movq %rdx, %r13
+ movq 24(%rcx), %rdx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rax, %r11
+ movq %rdx, %r10
+ movq 16(%rcx), %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rax, %r14
+ movq %rdx, %r15
+ movq (%rcx), %rbp
+ movq %rbp, -40(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -72(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rdx, %r12
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq %rbp
+ movq %rdx, %rcx
+ addq %rbx, %rcx
+ adcq %r14, %r12
+ adcq %r11, %r15
+ adcq %r8, %r10
+ adcq $0, %r13
+ addq %r9, %rax
+ movq 72(%rsi), %rax
+ movq 64(%rsi), %rdx
+ adcq 8(%rsi), %rcx
+ adcq 16(%rsi), %r12
+ adcq 24(%rsi), %r15
+ adcq 32(%rsi), %r10
+ adcq 40(%rsi), %r13
+ movq %r13, -112(%rsp) ## 8-byte Spill
+ movq 56(%rsi), %rdi
+ movq 48(%rsi), %rsi
+ adcq $0, %rsi
+ movq %rsi, -24(%rsp) ## 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -64(%rsp) ## 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ sbbq %r8, %r8
+ andl $1, %r8d
+ movq %rcx, %rsi
+ movq -104(%rsp), %r9 ## 8-byte Reload
+ imulq %r9, %rsi
+ movq %rsi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ addq %rbp, %rsi
+ adcq %rdi, %rbx
+ adcq -16(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -32(%rsp), %r14 ## 8-byte Folded Reload
+ adcq $0, %r11
+ addq %rcx, %rax
+ adcq %r12, %rsi
+ adcq %r15, %rbx
+ adcq %r10, %r13
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -24(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r8
+ movq %rsi, %rcx
+ imulq %r9, %rcx
+ movq %rcx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ movq -56(%rsp), %r9 ## 8-byte Reload
+ mulq %r9
+ movq %rdx, %r15
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ addq %rdi, %rcx
+ adcq -32(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -24(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %rbp
+ addq %rsi, %rax
+ adcq %rbx, %rcx
+ adcq %r13, %r12
+ adcq %r14, %r15
+ adcq %r11, %r10
+ adcq -64(%rsp), %rbp ## 8-byte Folded Reload
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r8
+ movq %rcx, %rsi
+ imulq -104(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %r9
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ addq %rdi, %rsi
+ adcq %rbx, %r9
+ adcq -112(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -64(%rsp), %r14 ## 8-byte Folded Reload
+ adcq $0, %r11
+ addq %rcx, %rax
+ adcq %r12, %rsi
+ adcq %r15, %r9
+ adcq %r10, %r13
+ adcq %rbp, %r14
+ adcq -96(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r8
+ movq -104(%rsp), %rdi ## 8-byte Reload
+ imulq %rsi, %rdi
+ movq %rdi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r15
+ movq %rdi, %rax
+ movq %rdi, %r10
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r12
+ movq %r10, %rax
+ movq -40(%rsp), %r10 ## 8-byte Reload
+ mulq %r10
+ addq %r12, %rdx
+ adcq %r15, %rdi
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -96(%rsp), %rcx ## 8-byte Folded Reload
+ adcq $0, %rbp
+ addq %rsi, %rax
+ adcq %r9, %rdx
+ adcq %r13, %rdi
+ adcq %r14, %rbx
+ adcq %r11, %rcx
+ adcq -48(%rsp), %rbp ## 8-byte Folded Reload
+ adcq $0, %r8
+ movq %rdx, %rax
+ subq %r10, %rax
+ movq %rdi, %rsi
+ sbbq -72(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rbx, %r9
+ sbbq -56(%rsp), %r9 ## 8-byte Folded Reload
+ movq %rcx, %r10
+ sbbq -88(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rbp, %r11
+ sbbq -80(%rsp), %r11 ## 8-byte Folded Reload
+ sbbq $0, %r8
+ andl $1, %r8d
+ cmovneq %rbp, %r11
+ testb %r8b, %r8b
+ cmovneq %rdx, %rax
+ movq -8(%rsp), %rdx ## 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rdi, %rsi
+ movq %rsi, 8(%rdx)
+ cmovneq %rbx, %r9
+ movq %r9, 16(%rdx)
+ cmovneq %rcx, %r10
+ movq %r10, 24(%rdx)
+ movq %r11, 32(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre5L
+ .p2align 4, 0x90
+_mcl_fp_addPre5L: ## @mcl_fp_addPre5L
+## BB#0:
+ movq 32(%rdx), %r8
+ movq 24(%rdx), %r9
+ movq 24(%rsi), %r11
+ movq 32(%rsi), %r10
+ movq 16(%rdx), %rcx
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rcx
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ adcq %r9, %r11
+ movq %r11, 24(%rdi)
+ adcq %r8, %r10
+ movq %r10, 32(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_subPre5L
+ .p2align 4, 0x90
+_mcl_fp_subPre5L: ## @mcl_fp_subPre5L
+## BB#0:
+ pushq %rbx
+ movq 32(%rsi), %r10
+ movq 24(%rdx), %r8
+ movq 32(%rdx), %r9
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %rcx
+ movq %rbx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r8, %r11
+ movq %r11, 24(%rdi)
+ sbbq %r9, %r10
+ movq %r10, 32(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ retq
+
+ .globl _mcl_fp_shr1_5L
+ .p2align 4, 0x90
+_mcl_fp_shr1_5L: ## @mcl_fp_shr1_5L
+## BB#0:
+ movq 32(%rsi), %r8
+ movq 24(%rsi), %rcx
+ movq 16(%rsi), %rdx
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rax
+ movq %rax, (%rdi)
+ shrdq $1, %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 16(%rdi)
+ shrdq $1, %r8, %rcx
+ movq %rcx, 24(%rdi)
+ shrq %r8
+ movq %r8, 32(%rdi)
+ retq
+
+ .globl _mcl_fp_add5L
+ .p2align 4, 0x90
+_mcl_fp_add5L: ## @mcl_fp_add5L
+## BB#0:
+ pushq %rbx
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %rbx
+ movq 24(%rsi), %r9
+ movq 32(%rsi), %r8
+ movq 16(%rdx), %r10
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r10
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ adcq %rbx, %r9
+ movq %r9, 24(%rdi)
+ adcq %r11, %r8
+ movq %r8, 32(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r10
+ sbbq 24(%rcx), %r9
+ sbbq 32(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB74_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r9, 24(%rdi)
+ movq %r8, 32(%rdi)
+LBB74_2: ## %carry
+ popq %rbx
+ retq
+
+ .globl _mcl_fp_addNF5L
+ .p2align 4, 0x90
+_mcl_fp_addNF5L: ## @mcl_fp_addNF5L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 32(%rdx), %r8
+ movq 24(%rdx), %r9
+ movq 16(%rdx), %r10
+ movq (%rdx), %r14
+ movq 8(%rdx), %r11
+ addq (%rsi), %r14
+ adcq 8(%rsi), %r11
+ adcq 16(%rsi), %r10
+ adcq 24(%rsi), %r9
+ adcq 32(%rsi), %r8
+ movq %r14, %rsi
+ subq (%rcx), %rsi
+ movq %r11, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r10, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r9, %r15
+ sbbq 24(%rcx), %r15
+ movq %r8, %rax
+ sbbq 32(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r14, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ cmovsq %r9, %r15
+ movq %r15, 24(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 32(%rdi)
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_sub5L
+ .p2align 4, 0x90
+_mcl_fp_sub5L: ## @mcl_fp_sub5L
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 32(%rsi), %r8
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq 24(%rsi), %r9
+ movq 16(%rsi), %r10
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %rsi
+ sbbq 16(%rdx), %r10
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r10, 16(%rdi)
+ sbbq %r11, %r9
+ movq %r9, 24(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 32(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB76_2
+## BB#1: ## %carry
+ movq 32(%rcx), %r11
+ movq 24(%rcx), %r14
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rbx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %rsi, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r9, %r14
+ movq %r14, 24(%rdi)
+ adcq %r8, %r11
+ movq %r11, 32(%rdi)
+LBB76_2: ## %nocarry
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fp_subNF5L
+ .p2align 4, 0x90
+_mcl_fp_subNF5L: ## @mcl_fp_subNF5L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 32(%rsi), %r13
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r10
+ movdqu (%rsi), %xmm2
+ movdqu 16(%rsi), %xmm3
+ pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1]
+ movd %xmm4, %r8
+ movd %xmm1, %r11
+ movd %xmm3, %r9
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %r14
+ pshufd $78, %xmm2, %xmm1 ## xmm1 = xmm2[2,3,0,1]
+ movd %xmm1, %r15
+ movd %xmm0, %rbx
+ movd %xmm2, %r12
+ subq %rbx, %r12
+ sbbq %r14, %r15
+ sbbq %r11, %r9
+ sbbq %r10, %r8
+ sbbq 32(%rdx), %r13
+ movq %r13, %rdx
+ sarq $63, %rdx
+ movq %rdx, %rbx
+ shldq $1, %r13, %rbx
+ movq 8(%rcx), %rsi
+ andq %rbx, %rsi
+ andq (%rcx), %rbx
+ movq 32(%rcx), %r10
+ andq %rdx, %r10
+ movq 24(%rcx), %rax
+ andq %rdx, %rax
+ rolq %rdx
+ andq 16(%rcx), %rdx
+ addq %r12, %rbx
+ movq %rbx, (%rdi)
+ adcq %r15, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r9, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r8, %rax
+ movq %rax, 24(%rdi)
+ adcq %r13, %r10
+ movq %r10, 32(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fpDbl_add5L
+ .p2align 4, 0x90
+_mcl_fpDbl_add5L: ## @mcl_fpDbl_add5L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 72(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 64(%rdx), %r11
+ movq 56(%rdx), %r14
+ movq 48(%rdx), %r15
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %r13
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %rbp
+ adcq 32(%rdx), %r13
+ movq 40(%rdx), %r9
+ movq %rbx, (%rdi)
+ movq 72(%rsi), %r8
+ movq %rax, 8(%rdi)
+ movq 64(%rsi), %r10
+ movq %r12, 16(%rdi)
+ movq 56(%rsi), %r12
+ movq %rbp, 24(%rdi)
+ movq 48(%rsi), %rbp
+ movq 40(%rsi), %rbx
+ movq %r13, 32(%rdi)
+ adcq %r9, %rbx
+ adcq %r15, %rbp
+ adcq %r14, %r12
+ adcq %r11, %r10
+ adcq -8(%rsp), %r8 ## 8-byte Folded Reload
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq %rbx, %rax
+ subq (%rcx), %rax
+ movq %rbp, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r12, %r9
+ sbbq 16(%rcx), %r9
+ movq %r10, %r11
+ sbbq 24(%rcx), %r11
+ movq %r8, %r14
+ sbbq 32(%rcx), %r14
+ sbbq $0, %rsi
+ andl $1, %esi
+ cmovneq %rbx, %rax
+ movq %rax, 40(%rdi)
+ testb %sil, %sil
+ cmovneq %rbp, %rdx
+ movq %rdx, 48(%rdi)
+ cmovneq %r12, %r9
+ movq %r9, 56(%rdi)
+ cmovneq %r10, %r11
+ movq %r11, 64(%rdi)
+ cmovneq %r8, %r14
+ movq %r14, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub5L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub5L: ## @mcl_fpDbl_sub5L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 72(%rdx), %r9
+ movq 64(%rdx), %r10
+ movq 56(%rdx), %r14
+ movq 16(%rsi), %r8
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %eax, %eax
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r8
+ movq 24(%rsi), %r12
+ sbbq 24(%rdx), %r12
+ movq %r15, (%rdi)
+ movq 32(%rsi), %rbx
+ sbbq 32(%rdx), %rbx
+ movq %r11, 8(%rdi)
+ movq 48(%rdx), %r15
+ movq 40(%rdx), %rdx
+ movq %r8, 16(%rdi)
+ movq 72(%rsi), %r8
+ movq %r12, 24(%rdi)
+ movq 64(%rsi), %r11
+ movq %rbx, 32(%rdi)
+ movq 40(%rsi), %rbp
+ sbbq %rdx, %rbp
+ movq 56(%rsi), %r12
+ movq 48(%rsi), %r13
+ sbbq %r15, %r13
+ sbbq %r14, %r12
+ sbbq %r10, %r11
+ sbbq %r9, %r8
+ movl $0, %edx
+ sbbq $0, %rdx
+ andl $1, %edx
+ movq (%rcx), %rsi
+ cmoveq %rax, %rsi
+ testb %dl, %dl
+ movq 16(%rcx), %rdx
+ cmoveq %rax, %rdx
+ movq 8(%rcx), %rbx
+ cmoveq %rax, %rbx
+ movq 32(%rcx), %r9
+ cmoveq %rax, %r9
+ cmovneq 24(%rcx), %rax
+ addq %rbp, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r13, %rbx
+ movq %rbx, 48(%rdi)
+ adcq %r12, %rdx
+ movq %rdx, 56(%rdi)
+ adcq %r11, %rax
+ movq %rax, 64(%rdi)
+ adcq %r8, %r9
+ movq %r9, 72(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mulUnitPre6L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre6L: ## @mcl_fp_mulUnitPre6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 40(%rsi)
+ movq %rdx, %r9
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r15
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r13
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %rbp, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 16(%rdi)
+ adcq %r14, %r13
+ movq %r13, 24(%rdi)
+ adcq %r11, %r15
+ movq %r15, 32(%rdi)
+ adcq %r8, %r10
+ movq %r10, 40(%rdi)
+ adcq $0, %r9
+ movq %r9, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_mulPre6L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre6L: ## @mcl_fpDbl_mulPre6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rsi, %r12
+ movq %rdi, -16(%rsp) ## 8-byte Spill
+ movq (%r12), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq (%rdx), %rsi
+ mulq %rsi
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 24(%r12), %rbp
+ movq %rbp, -104(%rsp) ## 8-byte Spill
+ movq 32(%r12), %rbx
+ movq 40(%r12), %r11
+ movq %rax, (%rdi)
+ movq %r11, %rax
+ mulq %rsi
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rsi
+ movq %rdx, %rcx
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rsi
+ movq %rax, %r9
+ movq %rdx, %rdi
+ movq 16(%r12), %r8
+ movq %r8, %rax
+ mulq %rsi
+ movq %rax, %r14
+ movq %rdx, %rbp
+ movq 8(%r12), %r10
+ movq %r10, %rax
+ mulq %rsi
+ movq %rdx, %r15
+ movq %rax, %r13
+ addq -88(%rsp), %r13 ## 8-byte Folded Reload
+ adcq %r14, %r15
+ adcq %r9, %rbp
+ adcq -112(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -96(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ movq -120(%rsp), %rsi ## 8-byte Reload
+ adcq $0, %rsi
+ movq -64(%rsp), %r9 ## 8-byte Reload
+ movq 8(%r9), %rcx
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %r11
+ movq -104(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %r8, %rax
+ mulq %rcx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq %r10, %rax
+ mulq %rcx
+ movq %rdx, %r10
+ movq %rax, %rbx
+ movq -72(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ addq %r13, %rax
+ movq -16(%rsp), %r13 ## 8-byte Reload
+ movq %rax, 8(%r13)
+ adcq %r15, %rbx
+ adcq %rbp, %r8
+ adcq %rdi, %r14
+ adcq -112(%rsp), %r11 ## 8-byte Folded Reload
+ movq -120(%rsp), %rax ## 8-byte Reload
+ adcq %rsi, %rax
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq %rdx, %rbx
+ adcq %r10, %r8
+ adcq -80(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq -88(%rsp), %rsi ## 8-byte Folded Reload
+ movq 40(%r12), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq 16(%r9), %rcx
+ mulq %rcx
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq 32(%r12), %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r10
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq 24(%r12), %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r9
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq 16(%r12), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %rbp
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq 8(%r12), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %rdi
+ movq %rdx, %r15
+ movq (%r12), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ mulq %rcx
+ addq %rbx, %rax
+ movq %rax, 16(%r13)
+ adcq %r8, %rdi
+ adcq %r14, %rbp
+ adcq %r11, %r9
+ adcq -120(%rsp), %r10 ## 8-byte Folded Reload
+ movq -72(%rsp), %rax ## 8-byte Reload
+ adcq %rsi, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %rdx, %rdi
+ adcq %r15, %rbp
+ adcq -56(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -48(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -40(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ adcq -112(%rsp), %rcx ## 8-byte Folded Reload
+ movq -64(%rsp), %rbx ## 8-byte Reload
+ movq 24(%rbx), %rsi
+ movq -88(%rsp), %rax ## 8-byte Reload
+ mulq %rsi
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq -96(%rsp), %rax ## 8-byte Reload
+ mulq %rsi
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq -104(%rsp), %rax ## 8-byte Reload
+ mulq %rsi
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq -80(%rsp), %rax ## 8-byte Reload
+ mulq %rsi
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq -32(%rsp), %rax ## 8-byte Reload
+ mulq %rsi
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq -24(%rsp), %rax ## 8-byte Reload
+ mulq %rsi
+ addq %rdi, %rax
+ movq -16(%rsp), %rsi ## 8-byte Reload
+ movq %rax, 24(%rsi)
+ adcq %rbp, %r11
+ adcq %r9, %r13
+ adcq %r10, %r15
+ adcq -72(%rsp), %r14 ## 8-byte Folded Reload
+ movq -120(%rsp), %rax ## 8-byte Reload
+ adcq %rcx, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %rdx, %r11
+ adcq %r8, %r13
+ adcq -112(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ movq 40(%r12), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq 32(%rbx), %rdi
+ mulq %rdi
+ movq %rax, %r9
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq 32(%r12), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r10
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 24(%r12), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r8
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq 16(%r12), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %rbx
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq (%r12), %rbp
+ movq 8(%r12), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ addq %r11, %rax
+ movq %rax, 32(%rsi)
+ adcq %r13, %r12
+ adcq %r15, %rbx
+ adcq %r14, %r8
+ adcq -120(%rsp), %r10 ## 8-byte Folded Reload
+ adcq %rcx, %r9
+ movq -64(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rcx
+ sbbq %rsi, %rsi
+ movq -80(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq -8(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %r11
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, %rbp
+ movq %rax, %rdi
+ movq -32(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, %r13
+ movq %rax, %r14
+ movq -40(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ andl $1, %esi
+ addq -48(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -104(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -72(%rsp), %rsi ## 8-byte Folded Reload
+ addq %rdi, %r12
+ movq -16(%rsp), %rcx ## 8-byte Reload
+ movq %r12, 40(%rcx)
+ adcq %r11, %rbx
+ adcq %rax, %r8
+ adcq %r14, %r10
+ adcq %r15, %r9
+ adcq -24(%rsp), %rsi ## 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rbp, %rbx
+ movq %rbx, 48(%rcx)
+ adcq -80(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 56(%rcx)
+ adcq %rdx, %r10
+ movq %r10, 64(%rcx)
+ adcq %r13, %r9
+ movq %r9, 72(%rcx)
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 80(%rcx)
+ adcq -64(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 88(%rcx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre6L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre6L: ## @mcl_fpDbl_sqrPre6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, -48(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r8
+ movq %r8, -120(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %r11
+ movq %r11, -112(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %r12
+ movq 40(%rsi), %r9
+ movq (%rsi), %rcx
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, %rbp
+ movq %rax, (%rdi)
+ movq %r9, %rax
+ mulq %rcx
+ movq %rdx, %rbx
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %r12, %rax
+ mulq %rcx
+ movq %rdx, %r10
+ movq %rax, %r13
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %rdi
+ movq %rax, %r15
+ movq %r8, %rax
+ mulq %rcx
+ movq %rax, %r11
+ movq %rdx, %r14
+ movq 8(%rsi), %r8
+ movq %r8, %rax
+ mulq %rcx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rax, %rcx
+ addq %rcx, %rbp
+ adcq %rdx, %r11
+ adcq %r15, %r14
+ adcq %r13, %rdi
+ adcq -128(%rsp), %r10 ## 8-byte Folded Reload
+ adcq $0, %rbx
+ movq %rbx, -72(%rsp) ## 8-byte Spill
+ movq %r9, %rax
+ mulq %r8
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %r12, %rax
+ mulq %r8
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, %r9
+ movq -112(%rsp), %rax ## 8-byte Reload
+ mulq %r8
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq -120(%rsp), %rax ## 8-byte Reload
+ mulq %r8
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %r8, %rax
+ mulq %r8
+ movq %rax, %rbx
+ addq %rcx, %rbp
+ movq -48(%rsp), %rax ## 8-byte Reload
+ movq %rbp, 8(%rax)
+ adcq %r11, %rbx
+ adcq %r14, %r12
+ adcq %rdi, %r15
+ adcq %r10, %r9
+ movq %r13, %rax
+ adcq -72(%rsp), %rax ## 8-byte Folded Reload
+ sbbq %r13, %r13
+ andl $1, %r13d
+ addq -56(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %rdx, %r12
+ adcq -120(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -64(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %r13 ## 8-byte Folded Reload
+ movq 40(%rsi), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %rdi
+ mulq %rdi
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r11
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq 24(%rsi), %rbp
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, %r8
+ movq %r8, -24(%rsp) ## 8-byte Spill
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r10
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq (%rsi), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq %rdi
+ movq %rax, %rcx
+ addq %rbx, %r14
+ movq -48(%rsp), %rax ## 8-byte Reload
+ movq %r14, 16(%rax)
+ adcq %r12, %r10
+ adcq %r15, %rcx
+ adcq %r8, %r9
+ adcq -88(%rsp), %r11 ## 8-byte Folded Reload
+ movq -96(%rsp), %r8 ## 8-byte Reload
+ adcq %r13, %r8
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ addq -104(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -32(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %rdx, %r9
+ adcq -128(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rdi ## 8-byte Folded Reload
+ movq -56(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq -64(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq -120(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq -72(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq %rbp
+ movq %rax, %r13
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ addq %r10, %rbx
+ movq -48(%rsp), %rax ## 8-byte Reload
+ movq %rbx, 24(%rax)
+ adcq %rcx, %r14
+ adcq -24(%rsp), %r9 ## 8-byte Folded Reload
+ adcq %r11, %r13
+ adcq %r8, %r15
+ adcq %rdi, %r12
+ sbbq %rcx, %rcx
+ movq 8(%rsi), %rbp
+ movq 40(%rsi), %rbx
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq (%rsi), %rdi
+ movq %rdi, %rax
+ mulq %rbx
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %r10
+ movq %rbp, %rax
+ mulq %r10
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %r10
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ andl $1, %ecx
+ addq -40(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -128(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -80(%rsp), %rcx ## 8-byte Folded Reload
+ movq 24(%rsi), %rdi
+ movq %rdi, %rax
+ mulq %rbx
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %r10
+ movq %rax, %rbp
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %rsi
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %r10
+ movq %rdx, %r11
+ movq %rax, %rsi
+ movq %rbx, %rax
+ mulq %r10
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rax, %rdi
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ movq %r10, %rax
+ mulq %r10
+ movq %rdx, %r8
+ addq -8(%rsp), %r14 ## 8-byte Folded Reload
+ movq -48(%rsp), %rdx ## 8-byte Reload
+ movq %r14, 32(%rdx)
+ adcq -32(%rsp), %r9 ## 8-byte Folded Reload
+ adcq %r13, %rsi
+ adcq %r15, %rbp
+ adcq %r12, %rax
+ adcq %rdi, %rcx
+ sbbq %r10, %r10
+ andl $1, %r10d
+ addq -24(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ adcq %r11, %rbp
+ adcq -40(%rsp), %rax ## 8-byte Folded Reload
+ adcq %r8, %rcx
+ movq -16(%rsp), %r8 ## 8-byte Reload
+ adcq %r8, %r10
+ addq -72(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 40(%rdx)
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -104(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ adcq %rdi, %rcx
+ adcq %rbx, %r10
+ sbbq %rdi, %rdi
+ andl $1, %edi
+ addq -64(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 48(%rdx)
+ adcq -56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 56(%rdx)
+ adcq -80(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 64(%rdx)
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 72(%rdx)
+ adcq %r8, %r10
+ movq %r10, 80(%rdx)
+ adcq -88(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 88(%rdx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont6L
+ .p2align 4, 0x90
+_mcl_fp_mont6L: ## @mcl_fp_mont6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $48, %rsp
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rdi, 40(%rsp) ## 8-byte Spill
+ movq 40(%rsi), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdi
+ mulq %rdi
+ movq %rax, %r10
+ movq %rdx, %r11
+ movq 32(%rsi), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r14
+ movq %rdx, %r15
+ movq 24(%rsi), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r8
+ movq %rdx, %rbx
+ movq 16(%rsi), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r9
+ movq %rdx, %r12
+ movq (%rsi), %rbp
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdx, %rdi
+ addq %rsi, %rdi
+ adcq %r9, %r13
+ adcq %r8, %r12
+ adcq %r14, %rbx
+ movq %rbx, -88(%rsp) ## 8-byte Spill
+ adcq %r10, %r15
+ movq %r15, -120(%rsp) ## 8-byte Spill
+ adcq $0, %r11
+ movq %r11, -112(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ imulq %rdx, %rbx
+ movq 40(%rcx), %rdx
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rdx
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r9
+ movq %rdx, %r14
+ movq 24(%rcx), %rdx
+ movq %rdx, (%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r8
+ movq %rdx, %r15
+ movq 16(%rcx), %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r10
+ movq %rdx, %r11
+ movq (%rcx), %rsi
+ movq %rsi, -24(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -16(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rdx, %rbp
+ movq %rax, %rcx
+ movq %rbx, %rax
+ mulq %rsi
+ movq %rdx, %rbx
+ addq %rcx, %rbx
+ adcq %r10, %rbp
+ adcq %r8, %r11
+ adcq %r9, %r15
+ adcq -104(%rsp), %r14 ## 8-byte Folded Reload
+ movq -128(%rsp), %rcx ## 8-byte Reload
+ adcq $0, %rcx
+ addq -96(%rsp), %rax ## 8-byte Folded Reload
+ adcq %rdi, %rbx
+ adcq %r13, %rbp
+ adcq %r12, %r11
+ adcq -88(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdi
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r12
+ movq %rdx, %rdi
+ addq %r10, %rdi
+ adcq %r9, %rcx
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r8 ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq -88(%rsp), %rdx ## 8-byte Folded Reload
+ movq -112(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rbx, %r12
+ adcq %rbp, %rdi
+ adcq %r11, %rcx
+ adcq %r15, %r13
+ adcq %r14, %r8
+ adcq -128(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ adcq %rsi, %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %r12, %rbx
+ imulq -32(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %rbx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ addq %r11, %r9
+ adcq %r10, %rbp
+ adcq -48(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -104(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r14 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r12, %rax
+ adcq %rdi, %r9
+ adcq %rcx, %rbp
+ adcq %r13, %rsi
+ adcq %r8, %r15
+ adcq -120(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq $0, -88(%rsp) ## 8-byte Folded Spill
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rcx
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r13
+ movq %rdx, %rcx
+ addq %r10, %rcx
+ adcq %r8, %rbx
+ adcq %rdi, %r12
+ adcq -104(%rsp), %r11 ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq -112(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %r9, %r13
+ adcq %rbp, %rcx
+ adcq %rsi, %rbx
+ adcq %r15, %r12
+ adcq %r14, %r11
+ adcq -128(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ adcq -88(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %r13, %rdi
+ imulq -32(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ addq %r10, %r8
+ adcq %r15, %rbp
+ adcq -48(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -104(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r14 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r13, %rax
+ adcq %rcx, %r8
+ adcq %rbx, %rbp
+ adcq %r12, %rsi
+ adcq %r11, %r9
+ adcq -120(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq $0, -88(%rsp) ## 8-byte Folded Spill
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rcx
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r13
+ movq %rdx, %rcx
+ addq %r12, %rcx
+ adcq %r10, %rbx
+ adcq %rdi, %r15
+ adcq -104(%rsp), %r11 ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq -112(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %r8, %r13
+ adcq %rbp, %rcx
+ adcq %rsi, %rbx
+ adcq %r9, %r15
+ adcq %r14, %r11
+ adcq -128(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ adcq -88(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %r13, %rsi
+ imulq -32(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %r8
+ movq %rsi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r9
+ movq %rsi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ addq %r9, %rsi
+ adcq %r8, %r12
+ adcq %r10, %r14
+ adcq -104(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -96(%rsp), %rbp ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r13, %rax
+ adcq %rcx, %rsi
+ adcq %rbx, %r12
+ adcq %r15, %r14
+ adcq %r11, %rdi
+ adcq -120(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -112(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq $0, -88(%rsp) ## 8-byte Folded Spill
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rcx
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r8
+ movq %rdx, %r13
+ addq %r9, %r13
+ adcq %r11, %r15
+ adcq -48(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ movq -120(%rsp), %rcx ## 8-byte Reload
+ adcq -96(%rsp), %rcx ## 8-byte Folded Reload
+ movq -112(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rsi, %r8
+ adcq %r12, %r13
+ adcq %r14, %r15
+ adcq %rdi, %r10
+ adcq %rbp, %rbx
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ adcq -88(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %r8, %rcx
+ imulq -32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ addq %r12, %r14
+ adcq %rdi, %rbp
+ adcq -48(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -104(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r9 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r8, %rax
+ adcq %r13, %r14
+ adcq %r15, %rbp
+ adcq %r10, %rsi
+ adcq %rbx, %r11
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, -120(%rsp) ## 8-byte Spill
+ adcq -112(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq -88(%rsp), %rdi ## 8-byte Reload
+ adcq $0, %rdi
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rcx
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r15
+ movq %rdx, %r8
+ addq %r9, %r8
+ adcq %rbx, %r10
+ adcq -80(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r12 ## 8-byte Folded Reload
+ movq -64(%rsp), %rax ## 8-byte Reload
+ adcq -112(%rsp), %rax ## 8-byte Folded Reload
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r14, %r15
+ adcq %rbp, %r8
+ adcq %rsi, %r10
+ adcq %r11, %r13
+ adcq -120(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, -72(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ adcq %rdi, %rdx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ sbbq %rcx, %rcx
+ movq -32(%rsp), %rdi ## 8-byte Reload
+ imulq %r15, %rdi
+ movq %rdi, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ andl $1, %ecx
+ addq %r14, %rax
+ adcq %r11, %rdx
+ adcq -40(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -80(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -32(%rsp), %r12 ## 8-byte Folded Reload
+ adcq $0, %rbp
+ addq %r15, %r9
+ adcq %r8, %rax
+ adcq %r10, %rdx
+ adcq %r13, %rbx
+ adcq -72(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -64(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -56(%rsp), %rbp ## 8-byte Folded Reload
+ adcq $0, %rcx
+ movq %rax, %r8
+ subq -24(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rdx, %r9
+ sbbq -16(%rsp), %r9 ## 8-byte Folded Reload
+ movq %rbx, %r10
+ sbbq -8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rsi, %r11
+ sbbq (%rsp), %r11 ## 8-byte Folded Reload
+ movq %r12, %r14
+ sbbq 8(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rbp, %r15
+ sbbq 16(%rsp), %r15 ## 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rsi, %r11
+ testb %cl, %cl
+ cmovneq %rax, %r8
+ movq 40(%rsp), %rax ## 8-byte Reload
+ movq %r8, (%rax)
+ cmovneq %rdx, %r9
+ movq %r9, 8(%rax)
+ cmovneq %rbx, %r10
+ movq %r10, 16(%rax)
+ movq %r11, 24(%rax)
+ cmovneq %r12, %r14
+ movq %r14, 32(%rax)
+ cmovneq %rbp, %r15
+ movq %r15, 40(%rax)
+ addq $48, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF6L
+ .p2align 4, 0x90
+_mcl_fp_montNF6L: ## @mcl_fp_montNF6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $40, %rsp
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rdi, 32(%rsp) ## 8-byte Spill
+ movq 40(%rsi), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdi
+ mulq %rdi
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rdx, %r12
+ movq 32(%rsi), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r14
+ movq %rdx, %r10
+ movq 24(%rsi), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r15
+ movq %rdx, %r9
+ movq 16(%rsi), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r11
+ movq %rdx, %r8
+ movq (%rsi), %rbx
+ movq %rbx, 8(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, %rbp
+ movq %rax, %rsi
+ movq %rbx, %rax
+ mulq %rdi
+ movq %rax, %r13
+ movq %rdx, %rdi
+ addq %rsi, %rdi
+ adcq %r11, %rbp
+ adcq %r15, %r8
+ adcq %r14, %r9
+ adcq -64(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, -128(%rsp) ## 8-byte Spill
+ adcq $0, %r12
+ movq %r12, -112(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %r13, %rbx
+ imulq %rax, %rbx
+ movq 40(%rcx), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r14
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r15
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rdx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r12
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rdx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rdx
+ movq %rax, %r10
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ movq (%rcx), %rsi
+ movq %rsi, -32(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq %rcx
+ movq %rdx, %r11
+ movq %rax, %rcx
+ movq %rbx, %rax
+ mulq %rsi
+ addq %r13, %rax
+ adcq %rdi, %rcx
+ adcq %rbp, %r10
+ adcq %r8, %r12
+ adcq %r9, %r15
+ adcq -128(%rsp), %r14 ## 8-byte Folded Reload
+ movq -112(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rcx
+ adcq %r11, %r10
+ adcq -8(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, -128(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq -72(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdi
+ movq %rdi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %rdi
+ movq %rdx, %rbp
+ addq %r11, %rbp
+ adcq %r14, %rbx
+ adcq -104(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -96(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r8
+ addq %rcx, %rdi
+ adcq %r10, %rbp
+ adcq %r12, %rbx
+ adcq %r15, %rsi
+ adcq -128(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r8
+ movq %rdi, %r11
+ imulq -48(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %r11, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %r11, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %rcx
+ movq %r11, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %r10
+ movq %r11, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r14
+ movq %r11, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ addq %rdi, %rax
+ adcq %rbp, %r14
+ adcq %rbx, %r10
+ adcq %rsi, %rcx
+ adcq %r13, %r15
+ movq -112(%rsp), %rax ## 8-byte Reload
+ adcq %r9, %rax
+ adcq $0, %r8
+ addq %rdx, %r14
+ adcq %r12, %r10
+ adcq -104(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -120(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, -120(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %r8 ## 8-byte Folded Reload
+ movq -72(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdi
+ movq %rdi, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %rbp
+ movq %rdx, %rbx
+ addq %r9, %rbx
+ adcq -8(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -104(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ adcq $0, %r13
+ addq %r14, %rbp
+ adcq %r10, %rbx
+ adcq %rcx, %rsi
+ adcq -120(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r11 ## 8-byte Folded Reload
+ adcq %r8, %r15
+ adcq $0, %r13
+ movq %rbp, %rcx
+ imulq -48(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ addq %rbp, %rax
+ adcq %rbx, %rdi
+ adcq %rsi, %r14
+ adcq %r12, %r10
+ adcq %r11, %r9
+ movq -112(%rsp), %rax ## 8-byte Reload
+ adcq %r15, %rax
+ adcq $0, %r13
+ addq %rdx, %rdi
+ adcq %r8, %r14
+ adcq -104(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %r13 ## 8-byte Folded Reload
+ movq -72(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rbp
+ movq %rbp, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r8
+ movq %rdx, %rbp
+ addq %r12, %rbp
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -96(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -120(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %r15
+ addq %rdi, %r8
+ adcq %r14, %rbp
+ adcq %r10, %rbx
+ adcq %r9, %rsi
+ adcq -112(%rsp), %rcx ## 8-byte Folded Reload
+ adcq %r13, %r11
+ adcq $0, %r15
+ movq %r8, %r14
+ imulq -48(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r9
+ movq %r14, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %r14, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r10
+ movq %r14, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %r14, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %rdi
+ movq %r14, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ addq %r8, %rax
+ adcq %rbp, %rdi
+ adcq %rbx, %r12
+ adcq %rsi, %r10
+ adcq %rcx, %r13
+ adcq %r11, %r9
+ adcq $0, %r15
+ addq %rdx, %rdi
+ adcq -104(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, -120(%rsp) ## 8-byte Spill
+ adcq -112(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, -112(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ movq -72(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rcx
+ movq %rcx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r13
+ movq %rcx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %rbp
+ addq %r13, %rbp
+ adcq -8(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -104(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -96(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -128(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rdi, %r11
+ adcq %r12, %rbp
+ adcq %r10, %rbx
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -112(%rsp), %r8 ## 8-byte Folded Reload
+ adcq %r15, %r9
+ adcq $0, %r14
+ movq %r11, %rcx
+ imulq -48(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r10
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ addq %r11, %rax
+ adcq %rbp, %rdi
+ adcq %rbx, %r15
+ adcq %rsi, %r10
+ adcq %r8, %r12
+ movq -112(%rsp), %rcx ## 8-byte Reload
+ adcq %r9, %rcx
+ adcq $0, %r14
+ addq %rdx, %rdi
+ adcq %r13, %r15
+ adcq -128(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, -128(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, -120(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %r14 ## 8-byte Folded Reload
+ movq -72(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rcx
+ movq %rcx, %rax
+ mulq -80(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -88(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r9
+ movq %rdx, %r8
+ addq %rsi, %r8
+ adcq %rbp, %r10
+ adcq -88(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %rbx
+ addq %rdi, %r9
+ adcq %r15, %r8
+ adcq -128(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r12 ## 8-byte Folded Reload
+ adcq %r14, %r11
+ adcq $0, %rbx
+ movq -48(%rsp), %rcx ## 8-byte Reload
+ imulq %r9, %rcx
+ movq %rcx, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %rcx, %rax
+ movq %rcx, %r15
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rax, %rcx
+ movq %r15, %rax
+ movq 24(%rsp), %r15 ## 8-byte Reload
+ mulq %r15
+ addq %r9, %r14
+ adcq %r8, %rax
+ adcq %r10, %rcx
+ adcq %r13, %rbp
+ adcq %r12, %rdi
+ adcq %r11, %rsi
+ adcq $0, %rbx
+ addq -88(%rsp), %rax ## 8-byte Folded Reload
+ adcq %rdx, %rcx
+ adcq -56(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -80(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -72(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -48(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rax, %r14
+ subq -32(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rcx, %r8
+ sbbq %r15, %r8
+ movq %rbp, %r9
+ sbbq -40(%rsp), %r9 ## 8-byte Folded Reload
+ movq %rdi, %r10
+ sbbq -24(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rsi, %r11
+ sbbq -16(%rsp), %r11 ## 8-byte Folded Reload
+ movq %rbx, %r15
+ sbbq -64(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, %rdx
+ sarq $63, %rdx
+ cmovsq %rax, %r14
+ movq 32(%rsp), %rax ## 8-byte Reload
+ movq %r14, (%rax)
+ cmovsq %rcx, %r8
+ movq %r8, 8(%rax)
+ cmovsq %rbp, %r9
+ movq %r9, 16(%rax)
+ cmovsq %rdi, %r10
+ movq %r10, 24(%rax)
+ cmovsq %rsi, %r11
+ movq %r11, 32(%rax)
+ cmovsq %rbx, %r15
+ movq %r15, 40(%rax)
+ addq $40, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed6L
+ .p2align 4, 0x90
+_mcl_fp_montRed6L: ## @mcl_fp_montRed6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $24, %rsp
+ movq %rdx, %rbp
+ movq %rdi, 16(%rsp) ## 8-byte Spill
+ movq -8(%rbp), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq (%rsi), %r10
+ movq %r10, %rdi
+ imulq %rax, %rdi
+ movq 40(%rbp), %rcx
+ movq %rcx, -24(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rax, %r14
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq 32(%rbp), %rcx
+ movq %rcx, -40(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rax, %r15
+ movq %rdx, %r9
+ movq 24(%rbp), %rcx
+ movq %rcx, -48(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rax, %r12
+ movq %rdx, %r11
+ movq 16(%rbp), %rcx
+ movq %rcx, -56(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rax, %rcx
+ movq %rdx, %r13
+ movq (%rbp), %rbx
+ movq 8(%rbp), %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rdx, %r8
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq %rbx
+ movq %rbx, %rdi
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq %rdx, %rbx
+ addq %rbp, %rbx
+ adcq %rcx, %r8
+ adcq %r12, %r13
+ adcq %r15, %r11
+ adcq %r14, %r9
+ movq -128(%rsp), %rcx ## 8-byte Reload
+ adcq $0, %rcx
+ addq %r10, %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %r8
+ adcq 24(%rsi), %r13
+ adcq 32(%rsi), %r11
+ adcq 40(%rsi), %r9
+ movq %r9, -120(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %rcx
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ movq 88(%rsi), %rax
+ movq 80(%rsi), %rcx
+ movq 72(%rsi), %rdx
+ movq 64(%rsi), %rbp
+ movq 56(%rsi), %rsi
+ adcq $0, %rsi
+ movq %rsi, -104(%rsp) ## 8-byte Spill
+ adcq $0, %rbp
+ movq %rbp, -72(%rsp) ## 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, -64(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ sbbq %r14, %r14
+ andl $1, %r14d
+ movq %rbx, %rsi
+ imulq -80(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r9
+ movq %rsi, %rax
+ mulq %rdi
+ movq %rdx, %rdi
+ addq %r9, %rdi
+ adcq %r10, %rbp
+ adcq 8(%rsp), %rcx ## 8-byte Folded Reload
+ adcq (%rsp), %r12 ## 8-byte Folded Reload
+ adcq -32(%rsp), %r15 ## 8-byte Folded Reload
+ movq -112(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %rbx, %rax
+ adcq %r8, %rdi
+ adcq %r13, %rbp
+ adcq %r11, %rcx
+ adcq -120(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ adcq $0, -72(%rsp) ## 8-byte Folded Spill
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, -88(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r14
+ movq %rdi, %rbx
+ imulq -80(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r9
+ movq %rbx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ addq %r11, %r10
+ adcq %r9, %r8
+ adcq (%rsp), %rsi ## 8-byte Folded Reload
+ adcq -32(%rsp), %r13 ## 8-byte Folded Reload
+ movq -120(%rsp), %rbx ## 8-byte Reload
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %rdi, %rax
+ adcq %rbp, %r10
+ adcq %rcx, %r8
+ adcq %r12, %rsi
+ adcq %r15, %r13
+ adcq -112(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -120(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq $0, -96(%rsp) ## 8-byte Folded Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, -88(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r14
+ movq %r10, %rcx
+ imulq -80(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %rax
+ movq -24(%rsp), %rbp ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rbx
+ movq %rcx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ addq %r9, %rcx
+ adcq %rbx, %rdi
+ adcq -32(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r11 ## 8-byte Folded Reload
+ movq -112(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r10, %rax
+ adcq %r8, %rcx
+ adcq %rsi, %rdi
+ adcq %r13, %r12
+ adcq -120(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -128(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ movq -88(%rsp), %r8 ## 8-byte Reload
+ adcq $0, %r8
+ adcq $0, %r14
+ movq %rcx, %rsi
+ imulq -80(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq %rbp
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %r10
+ movq %rsi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ addq %r10, %rbx
+ adcq %rbp, %r9
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ movq -120(%rsp), %rbp ## 8-byte Reload
+ adcq -72(%rsp), %rbp ## 8-byte Folded Reload
+ movq -128(%rsp), %rsi ## 8-byte Reload
+ adcq -88(%rsp), %rsi ## 8-byte Folded Reload
+ movq -96(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %rcx, %rax
+ adcq %rdi, %rbx
+ adcq %r12, %r9
+ adcq %r15, %r13
+ adcq %r11, %rbp
+ movq %rbp, -120(%rsp) ## 8-byte Spill
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -128(%rsp) ## 8-byte Spill
+ adcq -64(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, -88(%rsp) ## 8-byte Spill
+ adcq $0, %r14
+ movq -80(%rsp), %r8 ## 8-byte Reload
+ imulq %rbx, %r8
+ movq %r8, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %r8, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %r8, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %r8, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %r8, %rax
+ movq -16(%rsp), %r12 ## 8-byte Reload
+ mulq %r12
+ movq %rdx, %rcx
+ movq %rax, %r15
+ movq %r8, %rax
+ movq -8(%rsp), %r8 ## 8-byte Reload
+ mulq %r8
+ addq %r15, %rdx
+ adcq %r10, %rcx
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -64(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -80(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %rbp
+ addq %rbx, %rax
+ adcq %r9, %rdx
+ adcq %r13, %rcx
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -128(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -96(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -88(%rsp), %rbp ## 8-byte Folded Reload
+ adcq $0, %r14
+ movq %rdx, %rax
+ subq %r8, %rax
+ movq %rcx, %rbx
+ sbbq %r12, %rbx
+ movq %rsi, %r8
+ sbbq -56(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rdi, %r9
+ sbbq -48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r11, %r10
+ sbbq -40(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rbp, %r15
+ sbbq -24(%rsp), %r15 ## 8-byte Folded Reload
+ sbbq $0, %r14
+ andl $1, %r14d
+ cmovneq %rbp, %r15
+ testb %r14b, %r14b
+ cmovneq %rdx, %rax
+ movq 16(%rsp), %rdx ## 8-byte Reload
+ movq %rax, (%rdx)
+ cmovneq %rcx, %rbx
+ movq %rbx, 8(%rdx)
+ cmovneq %rsi, %r8
+ movq %r8, 16(%rdx)
+ cmovneq %rdi, %r9
+ movq %r9, 24(%rdx)
+ cmovneq %r11, %r10
+ movq %r10, 32(%rdx)
+ movq %r15, 40(%rdx)
+ addq $24, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre6L
+ .p2align 4, 0x90
+_mcl_fp_addPre6L: ## @mcl_fp_addPre6L
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 40(%rsi), %r11
+ movq 32(%rdx), %r9
+ movq 24(%rdx), %r10
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %r14
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r10, %rax
+ movq %rax, 24(%rdi)
+ adcq %r9, %r14
+ movq %r14, 32(%rdi)
+ adcq %r8, %r11
+ movq %r11, 40(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fp_subPre6L
+ .p2align 4, 0x90
+_mcl_fp_subPre6L: ## @mcl_fp_subPre6L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 40(%rsi), %r9
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %r11
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %rsi
+ movq 24(%rdx), %r14
+ movq 32(%rdx), %r15
+ sbbq 16(%rdx), %rcx
+ movq %rbx, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r14, %r11
+ movq %r11, 24(%rdi)
+ sbbq %r15, %r10
+ movq %r10, 32(%rdi)
+ sbbq %r8, %r9
+ movq %r9, 40(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_shr1_6L
+ .p2align 4, 0x90
+_mcl_fp_shr1_6L: ## @mcl_fp_shr1_6L
+## BB#0:
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %rdx
+ movq 16(%rsi), %rax
+ movq (%rsi), %rcx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rcx
+ movq %rcx, (%rdi)
+ shrdq $1, %rax, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rdx, %rax
+ movq %rax, 16(%rdi)
+ shrdq $1, %r9, %rdx
+ movq %rdx, 24(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 32(%rdi)
+ shrq %r8
+ movq %r8, 40(%rdi)
+ retq
+
+ .globl _mcl_fp_add6L
+ .p2align 4, 0x90
+_mcl_fp_add6L: ## @mcl_fp_add6L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %rbx
+ movq 40(%rdx), %r14
+ movq 40(%rsi), %r8
+ movq 32(%rdx), %r15
+ movq 24(%rdx), %rbx
+ movq 24(%rsi), %r10
+ movq 32(%rsi), %r9
+ movq 16(%rdx), %r11
+ movq (%rdx), %rax
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %r11
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r11, 16(%rdi)
+ adcq %rbx, %r10
+ movq %r10, 24(%rdi)
+ adcq %r15, %r9
+ movq %r9, 32(%rdi)
+ adcq %r14, %r8
+ movq %r8, 40(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %rax
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r11
+ sbbq 24(%rcx), %r10
+ sbbq 32(%rcx), %r9
+ sbbq 40(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB89_2
+## BB#1: ## %nocarry
+ movq %rax, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r11, 16(%rdi)
+ movq %r10, 24(%rdi)
+ movq %r9, 32(%rdi)
+ movq %r8, 40(%rdi)
+LBB89_2: ## %carry
+ popq %rbx
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addNF6L
+ .p2align 4, 0x90
+_mcl_fp_addNF6L: ## @mcl_fp_addNF6L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r8
+ movq 32(%rdx), %r9
+ movq 24(%rdx), %r10
+ movq 16(%rdx), %r11
+ movq (%rdx), %r15
+ movq 8(%rdx), %r14
+ addq (%rsi), %r15
+ adcq 8(%rsi), %r14
+ adcq 16(%rsi), %r11
+ adcq 24(%rsi), %r10
+ adcq 32(%rsi), %r9
+ adcq 40(%rsi), %r8
+ movq %r15, %rsi
+ subq (%rcx), %rsi
+ movq %r14, %rbx
+ sbbq 8(%rcx), %rbx
+ movq %r11, %rdx
+ sbbq 16(%rcx), %rdx
+ movq %r10, %r13
+ sbbq 24(%rcx), %r13
+ movq %r9, %r12
+ sbbq 32(%rcx), %r12
+ movq %r8, %rax
+ sbbq 40(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %r15, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r14, %rbx
+ movq %rbx, 8(%rdi)
+ cmovsq %r11, %rdx
+ movq %rdx, 16(%rdi)
+ cmovsq %r10, %r13
+ movq %r13, 24(%rdi)
+ cmovsq %r9, %r12
+ movq %r12, 32(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_sub6L
+ .p2align 4, 0x90
+_mcl_fp_sub6L: ## @mcl_fp_sub6L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 40(%rdx), %r14
+ movq 40(%rsi), %r8
+ movq 32(%rsi), %r9
+ movq 24(%rsi), %r10
+ movq 16(%rsi), %r11
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %rsi
+ movq 24(%rdx), %r15
+ movq 32(%rdx), %r12
+ sbbq 16(%rdx), %r11
+ movq %rax, (%rdi)
+ movq %rsi, 8(%rdi)
+ movq %r11, 16(%rdi)
+ sbbq %r15, %r10
+ movq %r10, 24(%rdi)
+ sbbq %r12, %r9
+ movq %r9, 32(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 40(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB91_2
+## BB#1: ## %carry
+ movq 40(%rcx), %r14
+ movq 32(%rcx), %r15
+ movq 24(%rcx), %r12
+ movq 8(%rcx), %rbx
+ movq 16(%rcx), %rdx
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %rsi, %rbx
+ movq %rbx, 8(%rdi)
+ adcq %r11, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r10, %r12
+ movq %r12, 24(%rdi)
+ adcq %r9, %r15
+ movq %r15, 32(%rdi)
+ adcq %r8, %r14
+ movq %r14, 40(%rdi)
+LBB91_2: ## %nocarry
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subNF6L
+ .p2align 4, 0x90
+_mcl_fp_subNF6L: ## @mcl_fp_subNF6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ movdqu 32(%rdx), %xmm2
+ pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1]
+ movd %xmm3, %r11
+ movdqu (%rsi), %xmm3
+ movdqu 16(%rsi), %xmm4
+ movdqu 32(%rsi), %xmm5
+ pshufd $78, %xmm5, %xmm6 ## xmm6 = xmm5[2,3,0,1]
+ movd %xmm6, %rax
+ movd %xmm2, %r14
+ movd %xmm5, %r8
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r15
+ pshufd $78, %xmm4, %xmm2 ## xmm2 = xmm4[2,3,0,1]
+ movd %xmm2, %r9
+ movd %xmm1, %r12
+ movd %xmm4, %r10
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %rbx
+ pshufd $78, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,0,1]
+ movd %xmm1, %r13
+ movd %xmm0, %rsi
+ movd %xmm3, %rbp
+ subq %rsi, %rbp
+ sbbq %rbx, %r13
+ sbbq %r12, %r10
+ sbbq %r15, %r9
+ sbbq %r14, %r8
+ sbbq %r11, %rax
+ movq %rax, %rsi
+ sarq $63, %rsi
+ movq %rsi, %rbx
+ shldq $1, %rax, %rbx
+ andq (%rcx), %rbx
+ movq 40(%rcx), %r11
+ andq %rsi, %r11
+ movq 32(%rcx), %r14
+ andq %rsi, %r14
+ movq 24(%rcx), %r15
+ andq %rsi, %r15
+ movq 16(%rcx), %rdx
+ andq %rsi, %rdx
+ rolq %rsi
+ andq 8(%rcx), %rsi
+ addq %rbp, %rbx
+ movq %rbx, (%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 8(%rdi)
+ adcq %r10, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r9, %r15
+ movq %r15, 24(%rdi)
+ adcq %r8, %r14
+ movq %r14, 32(%rdi)
+ adcq %rax, %r11
+ movq %r11, 40(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_add6L
+ .p2align 4, 0x90
+_mcl_fpDbl_add6L: ## @mcl_fpDbl_add6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 88(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 80(%rdx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq 72(%rdx), %r14
+ movq 64(%rdx), %r15
+ movq 24(%rsi), %rbp
+ movq 32(%rsi), %r13
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %rbp
+ adcq 32(%rdx), %r13
+ movq 56(%rdx), %r11
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rdx
+ movq %rbx, (%rdi)
+ movq 88(%rsi), %r8
+ movq %rax, 8(%rdi)
+ movq 80(%rsi), %r10
+ movq %r12, 16(%rdi)
+ movq 72(%rsi), %r12
+ movq %rbp, 24(%rdi)
+ movq 40(%rsi), %rax
+ adcq %rdx, %rax
+ movq 64(%rsi), %rdx
+ movq %r13, 32(%rdi)
+ movq 56(%rsi), %r13
+ movq 48(%rsi), %rbp
+ adcq %r9, %rbp
+ movq %rax, 40(%rdi)
+ adcq %r11, %r13
+ adcq %r15, %rdx
+ adcq %r14, %r12
+ adcq -16(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -8(%rsp), %r8 ## 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rbp, %rsi
+ subq (%rcx), %rsi
+ movq %r13, %rbx
+ sbbq 8(%rcx), %rbx
+ movq %rdx, %r9
+ sbbq 16(%rcx), %r9
+ movq %r12, %r11
+ sbbq 24(%rcx), %r11
+ movq %r10, %r14
+ sbbq 32(%rcx), %r14
+ movq %r8, %r15
+ sbbq 40(%rcx), %r15
+ sbbq $0, %rax
+ andl $1, %eax
+ cmovneq %rbp, %rsi
+ movq %rsi, 48(%rdi)
+ testb %al, %al
+ cmovneq %r13, %rbx
+ movq %rbx, 56(%rdi)
+ cmovneq %rdx, %r9
+ movq %r9, 64(%rdi)
+ cmovneq %r12, %r11
+ movq %r11, 72(%rdi)
+ cmovneq %r10, %r14
+ movq %r14, 80(%rdi)
+ cmovneq %r8, %r15
+ movq %r15, 88(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub6L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub6L: ## @mcl_fpDbl_sub6L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 88(%rdx), %r9
+ movq 80(%rdx), %r10
+ movq 72(%rdx), %r14
+ movq 16(%rsi), %r8
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %eax, %eax
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %r8
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 64(%rdx), %r13
+ movq %r15, (%rdi)
+ movq 56(%rdx), %rbp
+ movq %r11, 8(%rdi)
+ movq 48(%rdx), %r15
+ movq 40(%rdx), %rdx
+ movq %r8, 16(%rdi)
+ movq 88(%rsi), %r8
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %rdx, %rbx
+ movq 80(%rsi), %r11
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %rdx
+ sbbq %r15, %rdx
+ movq 72(%rsi), %r15
+ movq %rbx, 40(%rdi)
+ movq 64(%rsi), %r12
+ movq 56(%rsi), %rsi
+ sbbq %rbp, %rsi
+ sbbq %r13, %r12
+ sbbq %r14, %r15
+ sbbq %r10, %r11
+ sbbq %r9, %r8
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%rcx), %r14
+ cmoveq %rax, %r14
+ testb %bpl, %bpl
+ movq 16(%rcx), %r9
+ cmoveq %rax, %r9
+ movq 8(%rcx), %rbp
+ cmoveq %rax, %rbp
+ movq 40(%rcx), %r10
+ cmoveq %rax, %r10
+ movq 32(%rcx), %rbx
+ cmoveq %rax, %rbx
+ cmovneq 24(%rcx), %rax
+ addq %rdx, %r14
+ movq %r14, 48(%rdi)
+ adcq %rsi, %rbp
+ movq %rbp, 56(%rdi)
+ adcq %r12, %r9
+ movq %r9, 64(%rdi)
+ adcq %r15, %rax
+ movq %rax, 72(%rdi)
+ adcq %r11, %rbx
+ movq %rbx, 80(%rdi)
+ adcq %r8, %r10
+ movq %r10, 88(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mulUnitPre7L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre7L: ## @mcl_fp_mulUnitPre7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq 48(%rsi)
+ movq %rdx, %r10
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 40(%rsi)
+ movq %rdx, %r11
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %r15
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r13
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rax, (%rdi)
+ addq %r9, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %rbp, %r8
+ movq %r8, 16(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r14, %r13
+ movq %r13, 32(%rdi)
+ adcq -16(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, 40(%rdi)
+ adcq -8(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 48(%rdi)
+ adcq $0, %r10
+ movq %r10, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_mulPre7L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre7L: ## @mcl_fpDbl_mulPre7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $16, %rsp
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rsi, %r9
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq (%r9), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq (%rdx), %rsi
+ mulq %rsi
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq 32(%r9), %rbp
+ movq %rbp, -88(%rsp) ## 8-byte Spill
+ movq 40(%r9), %rcx
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ movq 48(%r9), %r14
+ movq %rax, (%rdi)
+ movq %r14, %rax
+ mulq %rsi
+ movq %rdx, %rdi
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq %rsi
+ movq %rdx, %rcx
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rsi
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdx, %rbp
+ movq 24(%r9), %r8
+ movq %r8, %rax
+ mulq %rsi
+ movq %rax, %r15
+ movq %rdx, %rbx
+ movq 16(%r9), %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulq %rsi
+ movq %rax, %r13
+ movq %rdx, %r12
+ movq 8(%r9), %r11
+ movq %r11, %rax
+ mulq %rsi
+ movq %rdx, %rsi
+ movq %rax, %r10
+ addq -120(%rsp), %r10 ## 8-byte Folded Reload
+ adcq %r13, %rsi
+ adcq %r15, %r12
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -96(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, -72(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -104(%rsp) ## 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -96(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rcx
+ movq %r14, %rax
+ mulq %rcx
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq -128(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq -88(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %r8, %rax
+ mulq %rcx
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq -112(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ movq %r11, %rax
+ mulq %rcx
+ movq %rdx, %r11
+ movq %rax, %rdi
+ movq -64(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ addq %r10, %rax
+ movq -8(%rsp), %r10 ## 8-byte Reload
+ movq %rax, 8(%r10)
+ adcq %rsi, %rdi
+ adcq %r12, %rbp
+ adcq %rbx, %r14
+ adcq -72(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ movq -80(%rsp), %rax ## 8-byte Reload
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq %rdx, %rdi
+ adcq %r11, %rbp
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq %r8, %r15
+ adcq -88(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ movq 48(%r9), %rdx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rcx
+ movq %rdx, %rax
+ mulq %rcx
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq 40(%r9), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq 32(%r9), %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r12
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq 24(%r9), %rax
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %rbx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq 16(%r9), %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r8
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq 8(%r9), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r11
+ movq %rdx, (%rsp) ## 8-byte Spill
+ movq (%r9), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ mulq %rcx
+ addq %rdi, %rax
+ movq %rax, 16(%r10)
+ adcq %rbp, %r11
+ adcq %r14, %r8
+ adcq %r15, %rbx
+ adcq %r13, %r12
+ movq -128(%rsp), %rdi ## 8-byte Reload
+ adcq -80(%rsp), %rdi ## 8-byte Folded Reload
+ movq -120(%rsp), %rax ## 8-byte Reload
+ adcq %rsi, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %rdx, %r11
+ adcq (%rsp), %r8 ## 8-byte Folded Reload
+ adcq 8(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -48(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -40(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -128(%rsp) ## 8-byte Spill
+ adcq -32(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %rcx ## 8-byte Folded Reload
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rbp
+ movq -64(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq -88(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq -96(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq -72(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq -112(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, %rdi
+ movq -24(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, %r14
+ movq %rax, %r10
+ movq -16(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ addq %r11, %rax
+ movq -8(%rsp), %rsi ## 8-byte Reload
+ movq %rax, 24(%rsi)
+ adcq %r8, %r10
+ adcq %rbx, %rdi
+ adcq %r12, %r15
+ adcq -128(%rsp), %r13 ## 8-byte Folded Reload
+ movq -64(%rsp), %rbp ## 8-byte Reload
+ adcq -120(%rsp), %rbp ## 8-byte Folded Reload
+ movq -80(%rsp), %rax ## 8-byte Reload
+ adcq %rcx, %rax
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq %rdx, %r10
+ adcq %r14, %rdi
+ adcq -112(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, -64(%rsp) ## 8-byte Spill
+ adcq -88(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %rsi ## 8-byte Folded Reload
+ movq 48(%r9), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %rbx ## 8-byte Reload
+ movq 32(%rbx), %rcx
+ mulq %rcx
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq 40(%r9), %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq 32(%r9), %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r12
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq 24(%r9), %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %rbp
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq 16(%r9), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r14
+ movq %rdx, (%rsp) ## 8-byte Spill
+ movq 8(%r9), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %rcx
+ movq %rax, %r11
+ movq %rdx, %r8
+ movq (%r9), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ mulq %rcx
+ addq %r10, %rax
+ movq -8(%rsp), %rcx ## 8-byte Reload
+ movq %rax, 32(%rcx)
+ adcq %rdi, %r11
+ adcq %r15, %r14
+ adcq %r13, %rbp
+ adcq -64(%rsp), %r12 ## 8-byte Folded Reload
+ movq -128(%rsp), %rcx ## 8-byte Reload
+ adcq -80(%rsp), %rcx ## 8-byte Folded Reload
+ movq -120(%rsp), %rax ## 8-byte Reload
+ adcq %rsi, %rax
+ sbbq %r13, %r13
+ andl $1, %r13d
+ addq %rdx, %r11
+ adcq %r8, %r14
+ adcq (%rsp), %rbp ## 8-byte Folded Reload
+ adcq 8(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -48(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ adcq -40(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %r13 ## 8-byte Folded Reload
+ movq 40(%rbx), %rcx
+ movq -88(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %rdi
+ movq -96(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, %r10
+ movq -104(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq -112(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ movq -16(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, %rsi
+ movq -32(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq -24(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ addq %r11, %rax
+ movq -8(%rsp), %rcx ## 8-byte Reload
+ movq %rax, 40(%rcx)
+ adcq %r14, %r8
+ adcq %rbp, %rsi
+ adcq %r12, %rbx
+ adcq -128(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r10 ## 8-byte Folded Reload
+ adcq %r13, %rdi
+ movq -56(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %r11
+ sbbq %rcx, %rcx
+ movq %r11, %rax
+ mulq 48(%r9)
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %r11, %rax
+ mulq 40(%r9)
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %r11, %rax
+ mulq 32(%r9)
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r13
+ movq %r11, %rax
+ mulq 24(%r9)
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ movq %r11, %rax
+ mulq 16(%r9)
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %r11, %rax
+ mulq 8(%r9)
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %r11, %rax
+ mulq (%r9)
+ andl $1, %ecx
+ addq -40(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -16(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -96(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -64(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -80(%rsp), %rcx ## 8-byte Folded Reload
+ addq %rax, %r8
+ movq -8(%rsp), %r9 ## 8-byte Reload
+ movq %r8, 48(%r9)
+ adcq %r12, %rsi
+ adcq %r14, %rbx
+ adcq %rbp, %r15
+ adcq %r13, %r10
+ adcq -32(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -112(%rsp), %rcx ## 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ addq %rdx, %rsi
+ adcq -48(%rsp), %rbx ## 8-byte Folded Reload
+ movq %r9, %rdx
+ movq %rsi, 56(%rdx)
+ movq %rbx, 64(%rdx)
+ adcq -24(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, 72(%rdx)
+ adcq -72(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 80(%rdx)
+ adcq -128(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 88(%rdx)
+ adcq -120(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 96(%rdx)
+ adcq -56(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 104(%rdx)
+ addq $16, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre7L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre7L: ## @mcl_fpDbl_sqrPre7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $16, %rsp
+ movq %rsi, %r9
+ movq %rdi, -24(%rsp) ## 8-byte Spill
+ movq 24(%r9), %r10
+ movq %r10, -128(%rsp) ## 8-byte Spill
+ movq 32(%r9), %r14
+ movq %r14, -88(%rsp) ## 8-byte Spill
+ movq 40(%r9), %rsi
+ movq %rsi, -80(%rsp) ## 8-byte Spill
+ movq 48(%r9), %rbp
+ movq %rbp, -120(%rsp) ## 8-byte Spill
+ movq (%r9), %rbx
+ movq %rbx, %rax
+ mulq %rbx
+ movq %rdx, %rcx
+ movq %rax, (%rdi)
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, %r11
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq %rbx
+ movq %rdx, %r8
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %r14, %rax
+ mulq %rbx
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %r10, %rax
+ mulq %rbx
+ movq %rax, %r14
+ movq %rdx, %rdi
+ movq 16(%r9), %r15
+ movq %r15, %rax
+ mulq %rbx
+ movq %rax, %r10
+ movq %rdx, %r12
+ movq 8(%r9), %rbp
+ movq %rbp, %rax
+ mulq %rbx
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ addq %rax, %rcx
+ adcq %rdx, %r10
+ adcq %r14, %r12
+ adcq %rsi, %rdi
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -104(%rsp) ## 8-byte Spill
+ adcq $0, %r11
+ movq %r11, -96(%rsp) ## 8-byte Spill
+ movq -120(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq -80(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %rsi
+ movq -88(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r11
+ movq -128(%rsp), %rax ## 8-byte Reload
+ mulq %rbp
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %r15, %rax
+ mulq %rbp
+ movq %rdx, %r15
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq %rbp
+ movq %rax, %rbp
+ addq -72(%rsp), %rcx ## 8-byte Folded Reload
+ movq -24(%rsp), %rax ## 8-byte Reload
+ movq %rcx, 8(%rax)
+ adcq %r10, %rbp
+ adcq %r12, %rbx
+ adcq %rdi, %r14
+ adcq %r13, %r11
+ movq %rsi, %rax
+ adcq -104(%rsp), %rax ## 8-byte Folded Reload
+ adcq -96(%rsp), %r8 ## 8-byte Folded Reload
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq -112(%rsp), %rbp ## 8-byte Folded Reload
+ adcq %rdx, %rbx
+ adcq %r15, %r14
+ adcq -128(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -88(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -40(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ movq 48(%r9), %rax
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq 16(%r9), %rdi
+ mulq %rdi
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq 40(%r9), %rax
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq 32(%r9), %rax
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r13
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq 24(%r9), %rcx
+ movq %rcx, %rax
+ mulq %rdi
+ movq %rax, %r10
+ movq %r10, -8(%rsp) ## 8-byte Spill
+ movq %rdx, %r12
+ movq %r12, -72(%rsp) ## 8-byte Spill
+ movq 8(%r9), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r15
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq (%r9), %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq %rdi
+ movq %rax, %rdi
+ addq %rbp, %r8
+ movq -24(%rsp), %rax ## 8-byte Reload
+ movq %r8, 16(%rax)
+ adcq %rbx, %r15
+ adcq %r14, %rdi
+ adcq %r10, %r11
+ adcq -48(%rsp), %r13 ## 8-byte Folded Reload
+ movq -56(%rsp), %r10 ## 8-byte Reload
+ adcq -40(%rsp), %r10 ## 8-byte Folded Reload
+ movq -120(%rsp), %rax ## 8-byte Reload
+ adcq %rsi, %rax
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ addq -16(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -64(%rsp), %rdi ## 8-byte Folded Reload
+ adcq %rdx, %r11
+ adcq %r12, %r13
+ adcq -32(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rbp ## 8-byte Folded Reload
+ movq -112(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq -80(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq -88(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq (%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq -104(%rsp), %rax ## 8-byte Reload
+ mulq %rcx
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ movq %rcx, %rax
+ mulq %rcx
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ addq %r15, %rbx
+ movq -24(%rsp), %rcx ## 8-byte Reload
+ movq %rbx, 24(%rcx)
+ adcq %rdi, %r12
+ adcq -8(%rsp), %r11 ## 8-byte Folded Reload
+ adcq %r13, %rax
+ movq %rax, %r15
+ movq %r8, %rsi
+ adcq %r10, %rsi
+ movq -112(%rsp), %rbx ## 8-byte Reload
+ adcq -120(%rsp), %rbx ## 8-byte Folded Reload
+ adcq %rbp, %r14
+ sbbq %r8, %r8
+ movq 8(%r9), %rcx
+ movq 40(%r9), %r13
+ movq %rcx, %rax
+ mulq %r13
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq (%r9), %rbp
+ movq %rbp, %rax
+ mulq %r13
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 32(%r9), %rdi
+ movq %rcx, %rax
+ mulq %rdi
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, %rbp
+ movq %rdx, (%rsp) ## 8-byte Spill
+ andl $1, %r8d
+ addq -64(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -48(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, -64(%rsp) ## 8-byte Spill
+ adcq -56(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -56(%rsp) ## 8-byte Spill
+ adcq -40(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -112(%rsp) ## 8-byte Spill
+ adcq -32(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -128(%rsp), %r8 ## 8-byte Folded Reload
+ movq 48(%r9), %rax
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %rcx
+ movq %r13, %rax
+ mulq %rdi
+ movq %rax, %rsi
+ movq %rsi, -48(%rsp) ## 8-byte Spill
+ movq %rdx, %rbx
+ movq 24(%r9), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r15
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq 16(%r9), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq %rdi
+ movq %rax, %rdi
+ addq %rbp, %r12
+ movq -24(%rsp), %rbp ## 8-byte Reload
+ movq %r12, 32(%rbp)
+ adcq -8(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -64(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -56(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rdi ## 8-byte Folded Reload
+ adcq %rsi, %r14
+ adcq %r8, %rcx
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ addq (%rsp), %r11 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r10 ## 8-byte Folded Reload
+ adcq 8(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -16(%rsp), %rdi ## 8-byte Folded Reload
+ adcq %rdx, %r14
+ adcq %rbx, %rcx
+ adcq -72(%rsp), %rsi ## 8-byte Folded Reload
+ movq -128(%rsp), %rax ## 8-byte Reload
+ mulq %r13
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq -32(%rsp), %rax ## 8-byte Reload
+ mulq %r13
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq -40(%rsp), %rax ## 8-byte Reload
+ mulq %r13
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %r13, %rax
+ mulq %r13
+ movq %rax, %r13
+ addq -104(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 40(%rbp)
+ adcq -96(%rsp), %r10 ## 8-byte Folded Reload
+ adcq %r15, %r12
+ adcq %rdi, %r8
+ movq %r14, %rax
+ adcq -48(%rsp), %rax ## 8-byte Folded Reload
+ adcq %rcx, %r13
+ movq -120(%rsp), %rcx ## 8-byte Reload
+ adcq %rsi, %rcx
+ sbbq %r14, %r14
+ andl $1, %r14d
+ addq -88(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -72(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -104(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ adcq %rbx, %r13
+ adcq %rdx, %rcx
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ movq 48(%r9), %rcx
+ movq %rcx, %rax
+ mulq 40(%r9)
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 32(%r9)
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ movq %rcx, %rax
+ mulq 24(%r9)
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq 16(%r9)
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r11
+ movq %rcx, %rax
+ mulq 8(%r9)
+ movq %rdx, %r15
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq (%r9)
+ movq %rdx, %r9
+ movq %rax, %rsi
+ movq %rcx, %rax
+ mulq %rcx
+ addq %r10, %rsi
+ movq -24(%rsp), %r10 ## 8-byte Reload
+ movq %rsi, 48(%r10)
+ adcq %r12, %rdi
+ adcq -104(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rbp ## 8-byte Folded Reload
+ adcq %r13, %rbx
+ adcq -120(%rsp), %r8 ## 8-byte Folded Reload
+ adcq %r14, %rax
+ sbbq %rcx, %rcx
+ andl $1, %ecx
+ addq %r9, %rdi
+ adcq %r15, %r11
+ movq %r10, %rsi
+ movq %rdi, 56(%rsi)
+ movq %r11, 64(%rsi)
+ adcq -128(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 72(%rsi)
+ adcq -88(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 80(%rsi)
+ adcq -80(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 88(%rsi)
+ adcq -112(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 96(%rsi)
+ adcq %rdx, %rcx
+ movq %rcx, 104(%rsi)
+ addq $16, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont7L
+ .p2align 4, 0x90
+_mcl_fp_mont7L: ## @mcl_fp_mont7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $88, %rsp
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rdi, 80(%rsp) ## 8-byte Spill
+ movq 48(%rsi), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdi
+ mulq %rdi
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq %rdx, %r12
+ movq 40(%rsi), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq %rdx, %r8
+ movq 32(%rsi), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rdx, %r9
+ movq 24(%rsi), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r14
+ movq %rdx, %r11
+ movq 16(%rsi), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rax, %r15
+ movq %rdx, %rbx
+ movq (%rsi), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ mulq %rdi
+ movq %rdx, %r13
+ movq %rax, %rsi
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rdx, %r10
+ addq %rsi, %r10
+ adcq %r15, %r13
+ adcq %r14, %rbx
+ movq %rbx, -72(%rsp) ## 8-byte Spill
+ adcq -8(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, -56(%rsp) ## 8-byte Spill
+ adcq (%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, -112(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -104(%rsp) ## 8-byte Spill
+ adcq $0, %r12
+ movq %r12, -96(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, 40(%rsp) ## 8-byte Spill
+ movq %rax, %rdi
+ imulq %rdx, %rdi
+ movq 48(%rcx), %rdx
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq 40(%rcx), %rdx
+ movq %rdx, (%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rdx
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rax, %r14
+ movq %rdx, %r9
+ movq 24(%rcx), %rdx
+ movq %rdx, 64(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rax, %r8
+ movq %rdx, %rbx
+ movq 16(%rcx), %rdx
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rdx
+ movq %rax, %r15
+ movq %rdx, %rbp
+ movq (%rcx), %rsi
+ movq %rsi, 48(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq %rcx
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rdi, %rax
+ mulq %rsi
+ movq %rdx, %r11
+ addq %r12, %r11
+ adcq %r15, %rcx
+ adcq %r8, %rbp
+ adcq %r14, %rbx
+ adcq -64(%rsp), %r9 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq -88(%rsp), %rdx ## 8-byte Folded Reload
+ movq -120(%rsp), %rdi ## 8-byte Reload
+ adcq $0, %rdi
+ addq -80(%rsp), %rax ## 8-byte Folded Reload
+ adcq %r10, %r11
+ adcq %r13, %rcx
+ adcq -72(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -56(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -112(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, -56(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -120(%rsp) ## 8-byte Spill
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ movq -16(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdi
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r12
+ movq %rdi, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r8
+ movq %rdx, %r14
+ addq %r9, %r14
+ adcq %r12, %r13
+ adcq -64(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r10 ## 8-byte Folded Reload
+ movq -112(%rsp), %rdi ## 8-byte Reload
+ adcq -80(%rsp), %rdi ## 8-byte Folded Reload
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ adcq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %r11, %r8
+ adcq %rcx, %r14
+ adcq %rbp, %r13
+ adcq %rbx, %r15
+ adcq -56(%rsp), %r10 ## 8-byte Folded Reload
+ adcq -128(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -112(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq %rsi, %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %r8, %rcx
+ imulq 40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rbx
+ movq %rcx, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq 72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ addq %rbp, %rcx
+ adcq %rdi, %rsi
+ adcq %rbx, %r9
+ adcq -88(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r11 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdi ## 8-byte Reload
+ adcq -72(%rsp), %rdi ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r8, %rax
+ adcq %r14, %rcx
+ adcq %r13, %rsi
+ adcq %r15, %r9
+ adcq %r10, %r12
+ adcq -112(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -128(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ adcq $0, -56(%rsp) ## 8-byte Folded Spill
+ movq -16(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rbx
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r14
+ movq %rdx, %r10
+ addq %r15, %r10
+ adcq %r8, %rdi
+ adcq -64(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -88(%rsp), %r13 ## 8-byte Folded Reload
+ movq -112(%rsp), %rbx ## 8-byte Reload
+ adcq -80(%rsp), %rbx ## 8-byte Folded Reload
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ adcq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rcx, %r14
+ adcq %rsi, %r10
+ adcq %r9, %rdi
+ adcq %r12, %rbp
+ adcq %r11, %r13
+ adcq -128(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -112(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq -56(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %r14, %rbx
+ imulq 40(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq 72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r12
+ movq %rbx, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ addq %r12, %r11
+ adcq %r15, %r8
+ adcq -64(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -80(%rsp), %r9 ## 8-byte Folded Reload
+ movq -128(%rsp), %rbx ## 8-byte Reload
+ adcq -72(%rsp), %rbx ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r14, %rax
+ adcq %r10, %r11
+ adcq %rdi, %r8
+ adcq %rbp, %rsi
+ adcq %r13, %rcx
+ adcq -112(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -128(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ adcq $0, -56(%rsp) ## 8-byte Folded Spill
+ movq -16(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rbx
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r13
+ addq %r15, %r13
+ adcq %r14, %rdi
+ adcq -64(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -88(%rsp), %r12 ## 8-byte Folded Reload
+ movq -112(%rsp), %rbx ## 8-byte Reload
+ adcq -80(%rsp), %rbx ## 8-byte Folded Reload
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ adcq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %r11, %r10
+ adcq %r8, %r13
+ adcq %rsi, %rdi
+ adcq %rcx, %rbp
+ adcq %r9, %r12
+ adcq -128(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -112(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq -56(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %r10, %rbx
+ imulq 40(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq 72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ addq %r15, %r11
+ adcq %r14, %r8
+ adcq -64(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -80(%rsp), %r9 ## 8-byte Folded Reload
+ movq -128(%rsp), %rbx ## 8-byte Reload
+ adcq -72(%rsp), %rbx ## 8-byte Folded Reload
+ movq -120(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r10, %rax
+ adcq %r13, %r11
+ adcq %rdi, %r8
+ adcq %rbp, %rsi
+ adcq %r12, %rcx
+ adcq -112(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -128(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ adcq $0, -56(%rsp) ## 8-byte Folded Spill
+ movq -16(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rbx
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r14
+ movq %rbx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r12
+ addq %r14, %r12
+ adcq %r13, %rdi
+ adcq -64(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -88(%rsp), %r15 ## 8-byte Folded Reload
+ movq -112(%rsp), %rbx ## 8-byte Reload
+ adcq -80(%rsp), %rbx ## 8-byte Folded Reload
+ movq -104(%rsp), %rdx ## 8-byte Reload
+ adcq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %r11, %r10
+ adcq %r8, %r12
+ adcq %rsi, %rdi
+ adcq %rcx, %rbp
+ adcq %r9, %r15
+ adcq -128(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -112(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ adcq -56(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %r10, %rcx
+ imulq 40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ addq %r8, %r11
+ adcq %r14, %rbx
+ adcq -64(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -88(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r13 ## 8-byte Folded Reload
+ movq -56(%rsp), %rdx ## 8-byte Reload
+ adcq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq -128(%rsp), %rcx ## 8-byte Reload
+ adcq $0, %rcx
+ addq %r10, %rax
+ adcq %r12, %r11
+ adcq %rdi, %rbx
+ adcq %rbp, %rsi
+ adcq %r15, %r9
+ adcq -112(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ movq -120(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ movq -16(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rcx
+ movq %rcx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r8
+ addq %r14, %r8
+ adcq %r12, %rdi
+ adcq -64(%rsp), %rbp ## 8-byte Folded Reload
+ movq -120(%rsp), %r14 ## 8-byte Reload
+ adcq -88(%rsp), %r14 ## 8-byte Folded Reload
+ movq -112(%rsp), %rdx ## 8-byte Reload
+ adcq -80(%rsp), %rdx ## 8-byte Folded Reload
+ movq -104(%rsp), %rcx ## 8-byte Reload
+ adcq -72(%rsp), %rcx ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %r11, %r10
+ adcq %rbx, %r8
+ adcq %rsi, %rdi
+ adcq %r9, %rbp
+ adcq %r13, %r14
+ movq %r14, -120(%rsp) ## 8-byte Spill
+ adcq -56(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -104(%rsp) ## 8-byte Spill
+ adcq %r15, %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %r10, %rcx
+ imulq 40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r13
+ movq %rcx, %rax
+ mulq 72(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r9
+ movq %rcx, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ addq %r9, %r11
+ adcq %r13, %rbx
+ adcq -64(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -80(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -72(%rsp), %r14 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r10, %rax
+ adcq %r8, %r11
+ adcq %rdi, %rbx
+ adcq %rbp, %r15
+ adcq -120(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, -104(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq -56(%rsp), %r8 ## 8-byte Reload
+ adcq $0, %r8
+ movq -16(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rcx
+ movq %rcx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rdi
+ movq %rcx, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rbp
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rax, %rsi
+ movq %rdx, %r10
+ addq %rbp, %r10
+ adcq %rdi, %r14
+ adcq -48(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -40(%rsp), %r9 ## 8-byte Folded Reload
+ movq -32(%rsp), %rcx ## 8-byte Reload
+ adcq -120(%rsp), %rcx ## 8-byte Folded Reload
+ movq -24(%rsp), %rax ## 8-byte Reload
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ movq -16(%rsp), %rdi ## 8-byte Reload
+ adcq $0, %rdi
+ addq %r11, %rsi
+ movq %rsi, -48(%rsp) ## 8-byte Spill
+ adcq %rbx, %r10
+ adcq %r15, %r14
+ adcq %r12, %r13
+ adcq -112(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, -40(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -32(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ adcq %r8, %rdi
+ movq %rdi, -16(%rsp) ## 8-byte Spill
+ sbbq %rcx, %rcx
+ movq 40(%rsp), %r8 ## 8-byte Reload
+ imulq %rsi, %r8
+ movq %r8, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq %r8, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq %r8, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq %r8, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq %r8, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r11
+ movq %r8, %rax
+ movq %r8, %r12
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, %r8
+ movq %r12, %rax
+ movq 72(%rsp), %r12 ## 8-byte Reload
+ mulq %r12
+ andl $1, %ecx
+ addq %r15, %rax
+ adcq %r11, %rdx
+ adcq 16(%rsp), %rbp ## 8-byte Folded Reload
+ adcq 24(%rsp), %rbx ## 8-byte Folded Reload
+ adcq 32(%rsp), %rsi ## 8-byte Folded Reload
+ adcq 40(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %rdi
+ addq -48(%rsp), %r8 ## 8-byte Folded Reload
+ adcq %r10, %rax
+ adcq %r14, %rdx
+ adcq %r13, %rbp
+ adcq -40(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -32(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -24(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -16(%rsp), %rdi ## 8-byte Folded Reload
+ adcq $0, %rcx
+ movq %rax, %r8
+ subq 48(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rdx, %r10
+ sbbq %r12, %r10
+ movq %rbp, %r11
+ sbbq 56(%rsp), %r11 ## 8-byte Folded Reload
+ movq %rbx, %r14
+ sbbq 64(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rsi, %r15
+ sbbq -8(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r9, %r12
+ sbbq (%rsp), %r12 ## 8-byte Folded Reload
+ movq %rdi, %r13
+ sbbq 8(%rsp), %r13 ## 8-byte Folded Reload
+ sbbq $0, %rcx
+ andl $1, %ecx
+ cmovneq %rdi, %r13
+ testb %cl, %cl
+ cmovneq %rax, %r8
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq %r8, (%rax)
+ cmovneq %rdx, %r10
+ movq %r10, 8(%rax)
+ cmovneq %rbp, %r11
+ movq %r11, 16(%rax)
+ cmovneq %rbx, %r14
+ movq %r14, 24(%rax)
+ cmovneq %rsi, %r15
+ movq %r15, 32(%rax)
+ cmovneq %r9, %r12
+ movq %r12, 40(%rax)
+ movq %r13, 48(%rax)
+ addq $88, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF7L
+ .p2align 4, 0x90
+_mcl_fp_montNF7L: ## @mcl_fp_montNF7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $80, %rsp
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rdi, 72(%rsp) ## 8-byte Spill
+ movq 48(%rsi), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq (%rdx), %rbx
+ mulq %rbx
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq %rdx, %r12
+ movq 40(%rsi), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ mulq %rbx
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq %rdx, %r8
+ movq 32(%rsi), %rax
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ mulq %rbx
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq %rdx, %rbp
+ movq 24(%rsi), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ mulq %rbx
+ movq %rax, %r10
+ movq %rdx, %r15
+ movq 16(%rsi), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ mulq %rbx
+ movq %rax, %r9
+ movq %rdx, %r14
+ movq (%rsi), %rdi
+ movq %rdi, -8(%rsp) ## 8-byte Spill
+ movq 8(%rsi), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ mulq %rbx
+ movq %rdx, %r13
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq %rbx
+ movq %rdx, %rsi
+ addq %r11, %rsi
+ adcq %r9, %r13
+ adcq %r10, %r14
+ adcq -32(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -24(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, -128(%rsp) ## 8-byte Spill
+ adcq -16(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -120(%rsp) ## 8-byte Spill
+ adcq $0, %r12
+ movq %r12, -104(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rdx
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ movq %rax, %r10
+ movq %rax, %r8
+ imulq %rdx, %r10
+ movq 48(%rcx), %rdx
+ movq %rdx, 32(%rsp) ## 8-byte Spill
+ movq %r10, %rax
+ mulq %rdx
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq 40(%rcx), %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %r10, %rax
+ mulq %rdx
+ movq %rax, %r11
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rdx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movq %r10, %rax
+ mulq %rdx
+ movq %rax, %rbp
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq %r10, %rax
+ mulq %rdx
+ movq %rax, %r12
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rdx
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ movq %r10, %rax
+ mulq %rdx
+ movq %rax, %rbx
+ movq %rdx, 24(%rsp) ## 8-byte Spill
+ movq (%rcx), %rdi
+ movq %rdi, 40(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq %r10, %rax
+ mulq %rcx
+ movq %rdx, %r9
+ movq %rax, %rcx
+ movq %r10, %rax
+ mulq %rdi
+ addq %r8, %rax
+ adcq %rsi, %rcx
+ adcq %r13, %rbx
+ adcq %r14, %r12
+ adcq %r15, %rbp
+ adcq -128(%rsp), %r11 ## 8-byte Folded Reload
+ movq -112(%rsp), %rdi ## 8-byte Reload
+ adcq -120(%rsp), %rdi ## 8-byte Folded Reload
+ movq -104(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rcx
+ adcq %r9, %rbx
+ adcq 24(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -88(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -80(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, -120(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -112(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq -40(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rsi
+ movq %rsi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %rdi
+ movq %rsi, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r15
+ addq %r11, %r15
+ adcq %rdi, %r8
+ adcq 24(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -88(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r14 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rcx, %r10
+ adcq %rbx, %r15
+ adcq %r12, %r8
+ adcq %rbp, %r9
+ adcq -120(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r14 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %r10, %rsi
+ imulq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ movq %rsi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r11
+ movq %rsi, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r12
+ movq %rsi, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %rbp
+ movq %rsi, %rax
+ mulq 40(%rsp) ## 8-byte Folded Reload
+ addq %r10, %rax
+ adcq %r15, %rbp
+ adcq %r8, %r12
+ adcq %r9, %r11
+ adcq %r13, %rbx
+ movq -120(%rsp), %r8 ## 8-byte Reload
+ adcq %r14, %r8
+ movq -112(%rsp), %rsi ## 8-byte Reload
+ adcq -128(%rsp), %rsi ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rbp
+ adcq %rdi, %r12
+ adcq %rcx, %r11
+ adcq -88(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -80(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -120(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -112(%rsp) ## 8-byte Spill
+ adcq -104(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq -40(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdi
+ movq %rdi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -104(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %r15
+ addq %r13, %r15
+ adcq %r14, %rcx
+ adcq 24(%rsp), %r8 ## 8-byte Folded Reload
+ adcq -88(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -80(%rsp), %r9 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq -72(%rsp), %rdx ## 8-byte Folded Reload
+ movq -104(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rbp, %r10
+ adcq %r12, %r15
+ adcq %r11, %rcx
+ adcq %rbx, %r8
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -112(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -96(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %r10, %rdi
+ imulq 16(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r11
+ movq %rdi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %rdi, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq 40(%rsp) ## 8-byte Folded Reload
+ addq %r10, %rax
+ adcq %r15, %rbx
+ adcq %rcx, %rbp
+ adcq %r8, %r12
+ adcq %rsi, %r11
+ movq -112(%rsp), %rcx ## 8-byte Reload
+ adcq %r9, %rcx
+ movq -96(%rsp), %rsi ## 8-byte Reload
+ adcq -128(%rsp), %rsi ## 8-byte Folded Reload
+ movq -104(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rbx
+ adcq %r14, %rbp
+ adcq %r13, %r12
+ adcq -120(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, -120(%rsp) ## 8-byte Spill
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -96(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq -40(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdi
+ movq %rdi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r13
+ movq %rdi, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r10
+ movq %rdx, %rdi
+ addq %r14, %rdi
+ adcq %r13, %r8
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -80(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -72(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -128(%rsp), %r11 ## 8-byte Folded Reload
+ adcq $0, %r9
+ addq %rbx, %r10
+ adcq %rbp, %rdi
+ adcq %r12, %r8
+ adcq -120(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -96(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, -112(%rsp) ## 8-byte Spill
+ adcq $0, %r9
+ movq %r10, %rbp
+ imulq 16(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r11
+ movq %rbp, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq 40(%rsp) ## 8-byte Folded Reload
+ addq %r10, %rax
+ adcq %rdi, %rbx
+ adcq %r8, %r11
+ adcq %rcx, %r12
+ adcq %rsi, %r14
+ movq -104(%rsp), %rcx ## 8-byte Reload
+ adcq %r15, %rcx
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq -112(%rsp), %rax ## 8-byte Folded Reload
+ adcq $0, %r9
+ addq %rdx, %rbx
+ adcq %r13, %r11
+ adcq -88(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -80(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, -112(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -104(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ adcq -120(%rsp), %r9 ## 8-byte Folded Reload
+ movq -40(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdi
+ movq %rdi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %rdi
+ movq %rdx, %r13
+ addq %r14, %r13
+ adcq %rbp, %r8
+ adcq -88(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -80(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -72(%rsp), %r10 ## 8-byte Folded Reload
+ movq -120(%rsp), %rax ## 8-byte Reload
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ adcq $0, %r15
+ addq %rbx, %rdi
+ adcq %r11, %r13
+ adcq %r12, %r8
+ adcq -112(%rsp), %rcx ## 8-byte Folded Reload
+ adcq -104(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -96(%rsp), %r10 ## 8-byte Folded Reload
+ adcq %r9, %rax
+ movq %rax, -120(%rsp) ## 8-byte Spill
+ adcq $0, %r15
+ movq %rdi, %rbp
+ imulq 16(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, %r12
+ movq %rbp, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq 40(%rsp) ## 8-byte Folded Reload
+ addq %rdi, %rax
+ adcq %r13, %rbx
+ adcq %r8, %r14
+ adcq %rcx, %r12
+ adcq %rsi, %r9
+ movq -112(%rsp), %rcx ## 8-byte Reload
+ adcq %r10, %rcx
+ movq -104(%rsp), %rax ## 8-byte Reload
+ adcq -120(%rsp), %rax ## 8-byte Folded Reload
+ adcq $0, %r15
+ addq %rdx, %rbx
+ adcq %r11, %r14
+ adcq -88(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -128(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, -128(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -112(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %r15 ## 8-byte Folded Reload
+ movq -40(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rbp
+ movq %rbp, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %rcx
+ movq %rbp, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r8
+ movq %rax, %r9
+ movq %rbp, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r11
+ movq %rdx, %r10
+ addq %r9, %r10
+ adcq %rcx, %r8
+ adcq 24(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -88(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -80(%rsp), %r13 ## 8-byte Folded Reload
+ movq -120(%rsp), %rcx ## 8-byte Reload
+ adcq -72(%rsp), %rcx ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rbx, %r11
+ adcq %r14, %r10
+ adcq %r12, %r8
+ adcq -128(%rsp), %rdi ## 8-byte Folded Reload
+ adcq -112(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -104(%rsp), %r13 ## 8-byte Folded Reload
+ adcq %r15, %rcx
+ movq %rcx, -120(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %r11, %rbx
+ imulq 16(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ movq %rax, -112(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, %r9
+ movq %rbx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %rbx, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, %rbp
+ movq %rbx, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rcx
+ movq %rbx, %rax
+ mulq 40(%rsp) ## 8-byte Folded Reload
+ addq %r11, %rax
+ adcq %r10, %rcx
+ adcq %r8, %rbp
+ adcq %rdi, %r15
+ adcq %rsi, %r9
+ movq -112(%rsp), %rbx ## 8-byte Reload
+ adcq %r13, %rbx
+ movq -104(%rsp), %rsi ## 8-byte Reload
+ adcq -120(%rsp), %rsi ## 8-byte Folded Reload
+ movq -96(%rsp), %rax ## 8-byte Reload
+ adcq $0, %rax
+ addq %rdx, %rcx
+ adcq %r12, %rbp
+ adcq %r14, %r15
+ adcq -88(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, -120(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -112(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -104(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq -40(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdi
+ movq %rdi, %rax
+ mulq -48(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ movq %rax, -64(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq 64(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %rsi
+ movq %rdi, %rax
+ mulq -8(%rsp) ## 8-byte Folded Reload
+ movq %rax, %r12
+ movq %rdx, %r8
+ addq %rsi, %r8
+ adcq %rbx, %r10
+ adcq %r9, %r11
+ adcq -64(%rsp), %r13 ## 8-byte Folded Reload
+ movq -48(%rsp), %rdx ## 8-byte Reload
+ adcq -56(%rsp), %rdx ## 8-byte Folded Reload
+ movq -40(%rsp), %rax ## 8-byte Reload
+ adcq -128(%rsp), %rax ## 8-byte Folded Reload
+ adcq $0, %r14
+ addq %rcx, %r12
+ adcq %rbp, %r8
+ adcq %r15, %r10
+ adcq -120(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -112(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -48(%rsp) ## 8-byte Spill
+ adcq -96(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ adcq $0, %r14
+ movq 16(%rsp), %rdi ## 8-byte Reload
+ imulq %r12, %rdi
+ movq %rdi, %rax
+ mulq 32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ movq %rax, %r9
+ movq %rdi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -56(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, %rsi
+ movq %rdi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq %rax, %rcx
+ movq %rdi, %rax
+ mulq 40(%rsp) ## 8-byte Folded Reload
+ movq %rdx, (%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq 56(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -8(%rsp) ## 8-byte Spill
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq 48(%rsp) ## 8-byte Folded Reload
+ addq %r12, %r15
+ adcq %r8, %rax
+ adcq %r10, %rbx
+ adcq %r11, %rcx
+ adcq %r13, %rsi
+ adcq -48(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -40(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %r14
+ addq (%rsp), %rax ## 8-byte Folded Reload
+ adcq %rdx, %rbx
+ adcq -8(%rsp), %rcx ## 8-byte Folded Reload
+ adcq 8(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -64(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -56(%rsp), %r9 ## 8-byte Folded Reload
+ adcq 16(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rax, %r13
+ subq 40(%rsp), %r13 ## 8-byte Folded Reload
+ movq %rbx, %r12
+ sbbq 48(%rsp), %r12 ## 8-byte Folded Reload
+ movq %rcx, %r8
+ sbbq 56(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rsi, %r10
+ sbbq -32(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rbp, %r11
+ sbbq -24(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r9, %r15
+ sbbq -16(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r14, %rdx
+ sbbq 32(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ sarq $63, %rdi
+ cmovsq %rax, %r13
+ movq 72(%rsp), %rax ## 8-byte Reload
+ movq %r13, (%rax)
+ cmovsq %rbx, %r12
+ movq %r12, 8(%rax)
+ cmovsq %rcx, %r8
+ movq %r8, 16(%rax)
+ cmovsq %rsi, %r10
+ movq %r10, 24(%rax)
+ cmovsq %rbp, %r11
+ movq %r11, 32(%rax)
+ cmovsq %r9, %r15
+ movq %r15, 40(%rax)
+ cmovsq %r14, %rdx
+ movq %rdx, 48(%rax)
+ addq $80, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed7L
+ .p2align 4, 0x90
+_mcl_fp_montRed7L: ## @mcl_fp_montRed7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $72, %rsp
+ movq %rdx, %rcx
+ movq %rdi, 64(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rax
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq (%rsi), %rbp
+ movq %rbp, -48(%rsp) ## 8-byte Spill
+ imulq %rax, %rbp
+ movq 48(%rcx), %rdx
+ movq %rdx, (%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq 40(%rcx), %rdx
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rdx, %r15
+ movq 32(%rcx), %rdx
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r14
+ movq %rdx, %r11
+ movq 24(%rcx), %rdx
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r13
+ movq %rdx, %r10
+ movq 16(%rcx), %rdx
+ movq %rdx, -16(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rdx
+ movq %rax, %r9
+ movq %rdx, %r12
+ movq (%rcx), %rdi
+ movq %rdi, 24(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rcx
+ movq %rcx, -24(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %rcx
+ movq %rdx, %rcx
+ movq %rax, %rbx
+ movq %rbp, %rax
+ mulq %rdi
+ movq %rdx, %r8
+ addq %rbx, %r8
+ adcq %r9, %rcx
+ adcq %r13, %r12
+ adcq %r14, %r10
+ adcq -72(%rsp), %r11 ## 8-byte Folded Reload
+ adcq -104(%rsp), %r15 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq -48(%rsp), %rax ## 8-byte Folded Reload
+ adcq 8(%rsi), %r8
+ adcq 16(%rsi), %rcx
+ adcq 24(%rsi), %r12
+ adcq 32(%rsi), %r10
+ movq %r10, 40(%rsp) ## 8-byte Spill
+ adcq 40(%rsi), %r11
+ movq %r11, -40(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %r15
+ movq %r15, -96(%rsp) ## 8-byte Spill
+ adcq 56(%rsi), %rdx
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq 104(%rsi), %rax
+ movq 96(%rsi), %rdx
+ movq 88(%rsi), %rdi
+ movq 80(%rsi), %rbp
+ movq 72(%rsi), %rbx
+ movq 64(%rsi), %r9
+ adcq $0, %r9
+ adcq $0, %rbx
+ movq %rbx, -8(%rsp) ## 8-byte Spill
+ adcq $0, %rbp
+ movq %rbp, -80(%rsp) ## 8-byte Spill
+ adcq $0, %rdi
+ movq %rdi, -64(%rsp) ## 8-byte Spill
+ adcq $0, %rdx
+ movq %rdx, -72(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq %r8, %rdi
+ imulq -56(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ mulq %r13
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %rsi
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %r15
+ movq %rdi, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r14
+ addq %r15, %r14
+ adcq %rsi, %r11
+ adcq %r10, %rbp
+ adcq 56(%rsp), %rbx ## 8-byte Folded Reload
+ movq -88(%rsp), %rdi ## 8-byte Reload
+ adcq 48(%rsp), %rdi ## 8-byte Folded Reload
+ movq -120(%rsp), %rsi ## 8-byte Reload
+ adcq 32(%rsp), %rsi ## 8-byte Folded Reload
+ movq -112(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r8, %rax
+ adcq %rcx, %r14
+ adcq %r12, %r11
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ adcq -40(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -96(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -88(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, -120(%rsp) ## 8-byte Spill
+ adcq %r9, %rdx
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ adcq $0, -8(%rsp) ## 8-byte Folded Spill
+ adcq $0, -80(%rsp) ## 8-byte Folded Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, -72(%rsp) ## 8-byte Folded Spill
+ adcq $0, -104(%rsp) ## 8-byte Folded Spill
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ movq %r14, %rcx
+ imulq -56(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ mulq %r15
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq %r13
+ movq %rdx, -40(%rsp) ## 8-byte Spill
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ movq %rax, %r12
+ movq %rcx, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, %r13
+ movq %rcx, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ addq %r13, %r10
+ adcq %r12, %r9
+ adcq %r8, %rdi
+ adcq 48(%rsp), %rsi ## 8-byte Folded Reload
+ movq -40(%rsp), %r8 ## 8-byte Reload
+ adcq 32(%rsp), %r8 ## 8-byte Folded Reload
+ movq -96(%rsp), %rdx ## 8-byte Reload
+ adcq 40(%rsp), %rdx ## 8-byte Folded Reload
+ movq -128(%rsp), %rcx ## 8-byte Reload
+ adcq $0, %rcx
+ addq %r14, %rax
+ adcq %r11, %r10
+ adcq %rbp, %r9
+ adcq %rbx, %rdi
+ adcq -88(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -120(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, -40(%rsp) ## 8-byte Spill
+ adcq -112(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -96(%rsp) ## 8-byte Spill
+ adcq -8(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -128(%rsp) ## 8-byte Spill
+ adcq $0, -80(%rsp) ## 8-byte Folded Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, -72(%rsp) ## 8-byte Folded Spill
+ adcq $0, -104(%rsp) ## 8-byte Folded Spill
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ movq %r10, %rbp
+ imulq -56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, %rax
+ movq (%rsp), %r8 ## 8-byte Reload
+ mulq %r8
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq %r15
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -88(%rsp) ## 8-byte Spill
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %r13
+ movq %rbp, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, %r14
+ movq %rbp, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, %r15
+ movq %rbp, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ addq %r15, %r11
+ adcq %r14, %rbx
+ adcq %r13, %rcx
+ adcq 32(%rsp), %r12 ## 8-byte Folded Reload
+ movq -88(%rsp), %r14 ## 8-byte Reload
+ adcq 40(%rsp), %r14 ## 8-byte Folded Reload
+ movq -120(%rsp), %rbp ## 8-byte Reload
+ adcq -8(%rsp), %rbp ## 8-byte Folded Reload
+ movq -112(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r10, %rax
+ adcq %r9, %r11
+ adcq %rdi, %rbx
+ adcq %rsi, %rcx
+ adcq -40(%rsp), %r12 ## 8-byte Folded Reload
+ adcq -96(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, -88(%rsp) ## 8-byte Spill
+ adcq -128(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, -120(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ adcq $0, -64(%rsp) ## 8-byte Folded Spill
+ adcq $0, -72(%rsp) ## 8-byte Folded Spill
+ adcq $0, -104(%rsp) ## 8-byte Folded Spill
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ movq %r11, %rdi
+ imulq -56(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq %r8
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -128(%rsp) ## 8-byte Spill
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r13
+ movq %rax, %r14
+ movq %rdi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r10
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ addq %r8, %r9
+ adcq %r10, %rbp
+ adcq %r14, %rsi
+ adcq -8(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -40(%rsp), %r15 ## 8-byte Folded Reload
+ movq -128(%rsp), %rdi ## 8-byte Reload
+ adcq -96(%rsp), %rdi ## 8-byte Folded Reload
+ movq -80(%rsp), %rdx ## 8-byte Reload
+ adcq $0, %rdx
+ addq %r11, %rax
+ adcq %rbx, %r9
+ adcq %rcx, %rbp
+ adcq %r12, %rsi
+ adcq -88(%rsp), %r13 ## 8-byte Folded Reload
+ adcq -120(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -112(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, -128(%rsp) ## 8-byte Spill
+ adcq -64(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -80(%rsp) ## 8-byte Spill
+ adcq $0, -72(%rsp) ## 8-byte Folded Spill
+ movq -104(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ movq %r9, %rdi
+ imulq -56(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, -64(%rsp) ## 8-byte Spill
+ movq %rax, -104(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ movq %rax, -88(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, -120(%rsp) ## 8-byte Spill
+ movq %rax, -96(%rsp) ## 8-byte Spill
+ movq %rdi, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r12
+ movq %rax, %rbx
+ movq %rdi, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r11
+ movq %rax, %rcx
+ movq %rdi, %rax
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r10
+ movq %rax, %r8
+ movq %rdi, %rax
+ mulq 24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rdi
+ addq %r8, %rdi
+ adcq %rcx, %r10
+ adcq %rbx, %r11
+ adcq -96(%rsp), %r12 ## 8-byte Folded Reload
+ movq -120(%rsp), %rbx ## 8-byte Reload
+ adcq -88(%rsp), %rbx ## 8-byte Folded Reload
+ movq -112(%rsp), %rdx ## 8-byte Reload
+ adcq -104(%rsp), %rdx ## 8-byte Folded Reload
+ movq -64(%rsp), %rcx ## 8-byte Reload
+ adcq $0, %rcx
+ addq %r9, %rax
+ adcq %rbp, %rdi
+ adcq %rsi, %r10
+ adcq %r13, %r11
+ adcq %r15, %r12
+ adcq -128(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -120(%rsp) ## 8-byte Spill
+ adcq -80(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -112(%rsp) ## 8-byte Spill
+ adcq -72(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -64(%rsp) ## 8-byte Spill
+ adcq $0, %r14
+ movq %r14, -104(%rsp) ## 8-byte Spill
+ adcq $0, -48(%rsp) ## 8-byte Folded Spill
+ movq -56(%rsp), %rbp ## 8-byte Reload
+ imulq %rdi, %rbp
+ movq %rbp, %rax
+ mulq (%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rcx
+ movq %rax, -56(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq 8(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r9
+ movq %rax, -72(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq 16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %r15
+ movq %rax, -80(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -32(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbx
+ movq %rax, -128(%rsp) ## 8-byte Spill
+ movq %rbp, %rax
+ mulq -16(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rsi
+ movq %rax, %r13
+ movq %rbp, %rax
+ movq %rbp, %r14
+ mulq -24(%rsp) ## 8-byte Folded Reload
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %r14, %rax
+ movq 24(%rsp), %r14 ## 8-byte Reload
+ mulq %r14
+ addq %r8, %rdx
+ adcq %r13, %rbp
+ adcq -128(%rsp), %rsi ## 8-byte Folded Reload
+ adcq -80(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -72(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -56(%rsp), %r9 ## 8-byte Folded Reload
+ adcq $0, %rcx
+ addq %rdi, %rax
+ adcq %r10, %rdx
+ adcq %r11, %rbp
+ adcq %r12, %rsi
+ adcq -120(%rsp), %rbx ## 8-byte Folded Reload
+ adcq -112(%rsp), %r15 ## 8-byte Folded Reload
+ adcq -64(%rsp), %r9 ## 8-byte Folded Reload
+ adcq -104(%rsp), %rcx ## 8-byte Folded Reload
+ movq -48(%rsp), %rdi ## 8-byte Reload
+ adcq $0, %rdi
+ movq %rdx, %rax
+ subq %r14, %rax
+ movq %rbp, %r13
+ sbbq -24(%rsp), %r13 ## 8-byte Folded Reload
+ movq %rsi, %r8
+ sbbq -16(%rsp), %r8 ## 8-byte Folded Reload
+ movq %rbx, %r10
+ sbbq -32(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r15, %r11
+ sbbq 16(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r9, %r14
+ sbbq 8(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rcx, %r12
+ sbbq (%rsp), %r12 ## 8-byte Folded Reload
+ sbbq $0, %rdi
+ andl $1, %edi
+ cmovneq %rcx, %r12
+ testb %dil, %dil
+ cmovneq %rdx, %rax
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ movq %rax, (%rcx)
+ cmovneq %rbp, %r13
+ movq %r13, 8(%rcx)
+ cmovneq %rsi, %r8
+ movq %r8, 16(%rcx)
+ cmovneq %rbx, %r10
+ movq %r10, 24(%rcx)
+ cmovneq %r15, %r11
+ movq %r11, 32(%rcx)
+ cmovneq %r9, %r14
+ movq %r14, 40(%rcx)
+ movq %r12, 48(%rcx)
+ addq $72, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre7L
+ .p2align 4, 0x90
+_mcl_fp_addPre7L: ## @mcl_fp_addPre7L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r8
+ movq 48(%rsi), %r14
+ movq 40(%rdx), %r9
+ movq 40(%rsi), %r15
+ movq 32(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r12
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rbx
+ adcq 16(%rsi), %r12
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r12, 16(%rdi)
+ adcq %r11, %rax
+ movq %rax, 24(%rdi)
+ adcq %r10, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r9, %r15
+ movq %r15, 40(%rdi)
+ adcq %r8, %r14
+ movq %r14, 48(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subPre7L
+ .p2align 4, 0x90
+_mcl_fp_subPre7L: ## @mcl_fp_subPre7L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r8
+ movq 48(%rsi), %r10
+ movq 40(%rdx), %r9
+ movq 40(%rsi), %r15
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %r12
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %r12
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq 32(%rsi), %rdx
+ movq 24(%rsi), %rsi
+ movq %rbx, (%rdi)
+ movq %r12, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ sbbq %r14, %rdx
+ movq %rdx, 32(%rdi)
+ sbbq %r9, %r15
+ movq %r15, 40(%rdi)
+ sbbq %r8, %r10
+ movq %r10, 48(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_shr1_7L
+ .p2align 4, 0x90
+_mcl_fp_shr1_7L: ## @mcl_fp_shr1_7L
+## BB#0:
+ movq 48(%rsi), %r8
+ movq 40(%rsi), %r9
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %rax
+ movq 16(%rsi), %rcx
+ movq (%rsi), %rdx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rdx
+ movq %rdx, (%rdi)
+ shrdq $1, %rcx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rax, %rcx
+ movq %rcx, 16(%rdi)
+ shrdq $1, %r10, %rax
+ movq %rax, 24(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 32(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 40(%rdi)
+ shrq %r8
+ movq %r8, 48(%rdi)
+ retq
+
+ .globl _mcl_fp_add7L
+ .p2align 4, 0x90
+_mcl_fp_add7L: ## @mcl_fp_add7L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r14
+ movq 48(%rsi), %r8
+ movq 40(%rdx), %r15
+ movq 40(%rsi), %r9
+ movq 32(%rdx), %r12
+ movq 24(%rdx), %r13
+ movq 16(%rdx), %r10
+ movq (%rdx), %r11
+ movq 8(%rdx), %rdx
+ addq (%rsi), %r11
+ adcq 8(%rsi), %rdx
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rbx
+ adcq 16(%rsi), %r10
+ movq %r11, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ adcq %r13, %rax
+ movq %rax, 24(%rdi)
+ adcq %r12, %rbx
+ movq %rbx, 32(%rdi)
+ adcq %r15, %r9
+ movq %r9, 40(%rdi)
+ adcq %r14, %r8
+ movq %r8, 48(%rdi)
+ sbbq %rsi, %rsi
+ andl $1, %esi
+ subq (%rcx), %r11
+ sbbq 8(%rcx), %rdx
+ sbbq 16(%rcx), %r10
+ sbbq 24(%rcx), %rax
+ sbbq 32(%rcx), %rbx
+ sbbq 40(%rcx), %r9
+ sbbq 48(%rcx), %r8
+ sbbq $0, %rsi
+ testb $1, %sil
+ jne LBB104_2
+## BB#1: ## %nocarry
+ movq %r11, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %rax, 24(%rdi)
+ movq %rbx, 32(%rdi)
+ movq %r9, 40(%rdi)
+ movq %r8, 48(%rdi)
+LBB104_2: ## %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addNF7L
+ .p2align 4, 0x90
+_mcl_fp_addNF7L: ## @mcl_fp_addNF7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rbp
+ movq 32(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r14
+ movq (%rdx), %r12
+ movq 8(%rdx), %r15
+ addq (%rsi), %r12
+ adcq 8(%rsi), %r15
+ adcq 16(%rsi), %r14
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %r10
+ adcq 40(%rsi), %rbp
+ movq %rbp, -8(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %r9
+ movq %r12, %rsi
+ subq (%rcx), %rsi
+ movq %r15, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r14, %rax
+ sbbq 16(%rcx), %rax
+ movq %r11, %rbx
+ sbbq 24(%rcx), %rbx
+ movq %r10, %r13
+ sbbq 32(%rcx), %r13
+ sbbq 40(%rcx), %rbp
+ movq %r9, %r8
+ sbbq 48(%rcx), %r8
+ movq %r8, %rcx
+ sarq $63, %rcx
+ cmovsq %r12, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r15, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r14, %rax
+ movq %rax, 16(%rdi)
+ cmovsq %r11, %rbx
+ movq %rbx, 24(%rdi)
+ cmovsq %r10, %r13
+ movq %r13, 32(%rdi)
+ cmovsq -8(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 40(%rdi)
+ cmovsq %r9, %r8
+ movq %r8, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_sub7L
+ .p2align 4, 0x90
+_mcl_fp_sub7L: ## @mcl_fp_sub7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 48(%rdx), %r14
+ movq 48(%rsi), %r8
+ movq 40(%rdx), %r15
+ movq 40(%rsi), %r9
+ movq 32(%rdx), %r12
+ movq (%rsi), %rax
+ movq 8(%rsi), %r11
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r11
+ movq 16(%rsi), %r13
+ sbbq 16(%rdx), %r13
+ movq 32(%rsi), %r10
+ movq 24(%rsi), %rsi
+ sbbq 24(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r11, 8(%rdi)
+ movq %r13, 16(%rdi)
+ movq %rsi, 24(%rdi)
+ sbbq %r12, %r10
+ movq %r10, 32(%rdi)
+ sbbq %r15, %r9
+ movq %r9, 40(%rdi)
+ sbbq %r14, %r8
+ movq %r8, 48(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB106_2
+## BB#1: ## %carry
+ movq 48(%rcx), %r14
+ movq 40(%rcx), %r15
+ movq 32(%rcx), %r12
+ movq 24(%rcx), %rbx
+ movq 8(%rcx), %rdx
+ movq 16(%rcx), %rbp
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ adcq %r11, %rdx
+ movq %rdx, 8(%rdi)
+ adcq %r13, %rbp
+ movq %rbp, 16(%rdi)
+ adcq %rsi, %rbx
+ movq %rbx, 24(%rdi)
+ adcq %r10, %r12
+ movq %r12, 32(%rdi)
+ adcq %r9, %r15
+ movq %r15, 40(%rdi)
+ adcq %r8, %r14
+ movq %r14, 48(%rdi)
+LBB106_2: ## %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_subNF7L
+ .p2align 4, 0x90
+_mcl_fp_subNF7L: ## @mcl_fp_subNF7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 48(%rsi), %r11
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ movdqu 32(%rdx), %xmm2
+ pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1]
+ movd %xmm3, %r14
+ movdqu (%rsi), %xmm3
+ movdqu 16(%rsi), %xmm4
+ movdqu 32(%rsi), %xmm5
+ pshufd $78, %xmm5, %xmm6 ## xmm6 = xmm5[2,3,0,1]
+ movd %xmm6, %rcx
+ movd %xmm2, %r15
+ movd %xmm5, %r9
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r12
+ pshufd $78, %xmm4, %xmm2 ## xmm2 = xmm4[2,3,0,1]
+ movd %xmm2, %r10
+ movd %xmm1, %r13
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %rax
+ pshufd $78, %xmm3, %xmm1 ## xmm1 = xmm3[2,3,0,1]
+ movd %xmm0, %rbx
+ movd %xmm3, %rsi
+ subq %rbx, %rsi
+ movd %xmm1, %rbx
+ sbbq %rax, %rbx
+ movd %xmm4, %rbp
+ sbbq %r13, %rbp
+ sbbq %r12, %r10
+ sbbq %r15, %r9
+ sbbq %r14, %rcx
+ movq %rcx, -8(%rsp) ## 8-byte Spill
+ sbbq 48(%rdx), %r11
+ movq %r11, %rax
+ sarq $63, %rax
+ movq %rax, %rdx
+ shldq $1, %r11, %rdx
+ andq (%r8), %rdx
+ movq 48(%r8), %r14
+ andq %rax, %r14
+ movq 40(%r8), %r15
+ andq %rax, %r15
+ movq 32(%r8), %r12
+ andq %rax, %r12
+ movq 24(%r8), %r13
+ andq %rax, %r13
+ movq 16(%r8), %rcx
+ andq %rax, %rcx
+ andq 8(%r8), %rax
+ addq %rsi, %rdx
+ adcq %rbx, %rax
+ movq %rdx, (%rdi)
+ movq %rax, 8(%rdi)
+ adcq %rbp, %rcx
+ movq %rcx, 16(%rdi)
+ adcq %r10, %r13
+ movq %r13, 24(%rdi)
+ adcq %r9, %r12
+ movq %r12, 32(%rdi)
+ adcq -8(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, 40(%rdi)
+ adcq %r11, %r14
+ movq %r14, 48(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_add7L
+ .p2align 4, 0x90
+_mcl_fpDbl_add7L: ## @mcl_fpDbl_add7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 104(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 96(%rdx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq 88(%rdx), %r11
+ movq 80(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r12
+ movq 16(%rdx), %r9
+ movq (%rdx), %rax
+ movq 8(%rdx), %rbx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %r9
+ adcq 24(%rdx), %r15
+ adcq 32(%rdx), %r12
+ movq 72(%rdx), %r13
+ movq 64(%rdx), %rbp
+ movq %rax, (%rdi)
+ movq 56(%rdx), %r10
+ movq %rbx, 8(%rdi)
+ movq 48(%rdx), %rcx
+ movq 40(%rdx), %rdx
+ movq %r9, 16(%rdi)
+ movq 104(%rsi), %r9
+ movq %r15, 24(%rdi)
+ movq 40(%rsi), %rbx
+ adcq %rdx, %rbx
+ movq 96(%rsi), %r15
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %rdx
+ adcq %rcx, %rdx
+ movq 88(%rsi), %rax
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rcx
+ adcq %r10, %rcx
+ movq 80(%rsi), %r12
+ movq %rdx, 48(%rdi)
+ movq 72(%rsi), %rdx
+ movq 64(%rsi), %rsi
+ adcq %rbp, %rsi
+ adcq %r13, %rdx
+ adcq %r14, %r12
+ adcq %r11, %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ adcq -24(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, -24(%rsp) ## 8-byte Spill
+ adcq -8(%rsp), %r9 ## 8-byte Folded Reload
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq %rcx, %rbx
+ subq (%r8), %rbx
+ movq %rsi, %r10
+ sbbq 8(%r8), %r10
+ movq %rdx, %r11
+ sbbq 16(%r8), %r11
+ movq %r12, %r14
+ sbbq 24(%r8), %r14
+ movq -16(%rsp), %r13 ## 8-byte Reload
+ sbbq 32(%r8), %r13
+ sbbq 40(%r8), %r15
+ movq %r9, %rax
+ sbbq 48(%r8), %rax
+ sbbq $0, %rbp
+ andl $1, %ebp
+ cmovneq %rcx, %rbx
+ movq %rbx, 56(%rdi)
+ testb %bpl, %bpl
+ cmovneq %rsi, %r10
+ movq %r10, 64(%rdi)
+ cmovneq %rdx, %r11
+ movq %r11, 72(%rdi)
+ cmovneq %r12, %r14
+ movq %r14, 80(%rdi)
+ cmovneq -16(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 88(%rdi)
+ cmovneq -24(%rsp), %r15 ## 8-byte Folded Reload
+ movq %r15, 96(%rdi)
+ cmovneq %r9, %rax
+ movq %rax, 104(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub7L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub7L: ## @mcl_fpDbl_sub7L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 104(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 96(%rdx), %r10
+ movq 88(%rdx), %r14
+ movq 16(%rsi), %rax
+ movq (%rsi), %r15
+ movq 8(%rsi), %r11
+ xorl %ecx, %ecx
+ subq (%rdx), %r15
+ sbbq 8(%rdx), %r11
+ sbbq 16(%rdx), %rax
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 80(%rdx), %r13
+ movq 72(%rdx), %rbp
+ movq %r15, (%rdi)
+ movq 64(%rdx), %r9
+ movq %r11, 8(%rdi)
+ movq 56(%rdx), %r15
+ movq %rax, 16(%rdi)
+ movq 48(%rdx), %r11
+ movq 40(%rdx), %rdx
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %rdx, %rbx
+ movq 104(%rsi), %rax
+ movq %r12, 32(%rdi)
+ movq 48(%rsi), %r12
+ sbbq %r11, %r12
+ movq 96(%rsi), %r11
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rdx
+ sbbq %r15, %rdx
+ movq 88(%rsi), %r15
+ movq %r12, 48(%rdi)
+ movq 64(%rsi), %rbx
+ sbbq %r9, %rbx
+ movq 80(%rsi), %r12
+ movq 72(%rsi), %r9
+ sbbq %rbp, %r9
+ sbbq %r13, %r12
+ sbbq %r14, %r15
+ sbbq %r10, %r11
+ sbbq -8(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%r8), %r10
+ cmoveq %rcx, %r10
+ testb %bpl, %bpl
+ movq 16(%r8), %rbp
+ cmoveq %rcx, %rbp
+ movq 8(%r8), %rsi
+ cmoveq %rcx, %rsi
+ movq 48(%r8), %r14
+ cmoveq %rcx, %r14
+ movq 40(%r8), %r13
+ cmoveq %rcx, %r13
+ movq 32(%r8), %rax
+ cmoveq %rcx, %rax
+ cmovneq 24(%r8), %rcx
+ addq %rdx, %r10
+ adcq %rbx, %rsi
+ movq %r10, 56(%rdi)
+ movq %rsi, 64(%rdi)
+ adcq %r9, %rbp
+ movq %rbp, 72(%rdi)
+ adcq %r12, %rcx
+ movq %rcx, 80(%rdi)
+ adcq %r15, %rax
+ movq %rax, 88(%rdi)
+ adcq %r11, %r13
+ movq %r13, 96(%rdi)
+ adcq -8(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, 104(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .p2align 4, 0x90
+l_mulPv512x64: ## @mulPv512x64
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rcx
+ movq %rcx, %rax
+ mulq (%rsi)
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movq %rax, (%rdi)
+ movq %rcx, %rax
+ mulq 56(%rsi)
+ movq %rdx, %r10
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 48(%rsi)
+ movq %rdx, %r11
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq %rcx, %rax
+ mulq 40(%rsi)
+ movq %rdx, %r12
+ movq %rax, %r15
+ movq %rcx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %rbx
+ movq %rax, %r13
+ movq %rcx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %rcx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r9
+ movq %rax, %r14
+ movq %rcx, %rax
+ mulq 8(%rsi)
+ addq -24(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 8(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r8, %r9
+ movq %r9, 24(%rdi)
+ adcq %r13, %rbp
+ movq %rbp, 32(%rdi)
+ adcq %r15, %rbx
+ movq %rbx, 40(%rdi)
+ adcq -16(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, 48(%rdi)
+ adcq -8(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 56(%rdi)
+ adcq $0, %r10
+ movq %r10, 64(%rdi)
+ movq %rdi, %rax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mulUnitPre8L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre8L: ## @mcl_fp_mulUnitPre8L
+## BB#0:
+ pushq %rbx
+ subq $80, %rsp
+ movq %rdi, %rbx
+ leaq 8(%rsp), %rdi
+ callq l_mulPv512x64
+ movq 72(%rsp), %r8
+ movq 64(%rsp), %r9
+ movq 56(%rsp), %r10
+ movq 48(%rsp), %r11
+ movq 40(%rsp), %rdi
+ movq 32(%rsp), %rax
+ movq 24(%rsp), %rcx
+ movq 8(%rsp), %rdx
+ movq 16(%rsp), %rsi
+ movq %rdx, (%rbx)
+ movq %rsi, 8(%rbx)
+ movq %rcx, 16(%rbx)
+ movq %rax, 24(%rbx)
+ movq %rdi, 32(%rbx)
+ movq %r11, 40(%rbx)
+ movq %r10, 48(%rbx)
+ movq %r9, 56(%rbx)
+ movq %r8, 64(%rbx)
+ addq $80, %rsp
+ popq %rbx
+ retq
+
+ .globl _mcl_fpDbl_mulPre8L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre8L: ## @mcl_fpDbl_mulPre8L
+## BB#0:
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $200, %rsp
+ movq %rdx, %r15
+ movq %rsi, %rbx
+ movq %rdi, %r14
+ callq _mcl_fpDbl_mulPre4L
+ leaq 64(%r14), %rdi
+ leaq 32(%rbx), %rsi
+ leaq 32(%r15), %rdx
+ callq _mcl_fpDbl_mulPre4L
+ movq 56(%rbx), %r10
+ movq 48(%rbx), %rdx
+ movq (%rbx), %rsi
+ movq 8(%rbx), %rdi
+ addq 32(%rbx), %rsi
+ adcq 40(%rbx), %rdi
+ adcq 16(%rbx), %rdx
+ adcq 24(%rbx), %r10
+ pushfq
+ popq %r8
+ xorl %r9d, %r9d
+ movq 56(%r15), %rcx
+ movq 48(%r15), %r13
+ movq (%r15), %r12
+ movq 8(%r15), %rbx
+ addq 32(%r15), %r12
+ adcq 40(%r15), %rbx
+ adcq 16(%r15), %r13
+ adcq 24(%r15), %rcx
+ movl $0, %eax
+ cmovbq %r10, %rax
+ movq %rax, -88(%rbp) ## 8-byte Spill
+ movl $0, %eax
+ cmovbq %rdx, %rax
+ movq %rax, -80(%rbp) ## 8-byte Spill
+ movl $0, %eax
+ cmovbq %rdi, %rax
+ movq %rax, -72(%rbp) ## 8-byte Spill
+ movl $0, %eax
+ cmovbq %rsi, %rax
+ movq %rax, -64(%rbp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq %rsi, -168(%rbp)
+ movq %rdi, -160(%rbp)
+ movq %rdx, -152(%rbp)
+ movq %r10, -144(%rbp)
+ movq %r12, -136(%rbp)
+ movq %rbx, -128(%rbp)
+ movq %r13, -120(%rbp)
+ movq %rcx, -112(%rbp)
+ pushq %r8
+ popfq
+ cmovaeq %r9, %rcx
+ movq %rcx, -48(%rbp) ## 8-byte Spill
+ cmovaeq %r9, %r13
+ cmovaeq %r9, %rbx
+ cmovaeq %r9, %r12
+ sbbq %rax, %rax
+ movq %rax, -56(%rbp) ## 8-byte Spill
+ leaq -232(%rbp), %rdi
+ leaq -168(%rbp), %rsi
+ leaq -136(%rbp), %rdx
+ callq _mcl_fpDbl_mulPre4L
+ addq -64(%rbp), %r12 ## 8-byte Folded Reload
+ adcq -72(%rbp), %rbx ## 8-byte Folded Reload
+ adcq -80(%rbp), %r13 ## 8-byte Folded Reload
+ movq -48(%rbp), %r10 ## 8-byte Reload
+ adcq -88(%rbp), %r10 ## 8-byte Folded Reload
+ sbbq %rax, %rax
+ andl $1, %eax
+ movq -56(%rbp), %rdx ## 8-byte Reload
+ andl %edx, %r15d
+ andl $1, %r15d
+ addq -200(%rbp), %r12
+ adcq -192(%rbp), %rbx
+ adcq -184(%rbp), %r13
+ adcq -176(%rbp), %r10
+ adcq %rax, %r15
+ movq -208(%rbp), %rax
+ movq -216(%rbp), %rcx
+ movq -232(%rbp), %rsi
+ movq -224(%rbp), %rdx
+ subq (%r14), %rsi
+ sbbq 8(%r14), %rdx
+ sbbq 16(%r14), %rcx
+ sbbq 24(%r14), %rax
+ movq 32(%r14), %rdi
+ movq %rdi, -80(%rbp) ## 8-byte Spill
+ movq 40(%r14), %r8
+ movq %r8, -88(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r12
+ sbbq %r8, %rbx
+ movq 48(%r14), %rdi
+ movq %rdi, -72(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r13
+ movq 56(%r14), %rdi
+ movq %rdi, -64(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r10
+ sbbq $0, %r15
+ movq 64(%r14), %r11
+ subq %r11, %rsi
+ movq 72(%r14), %rdi
+ movq %rdi, -56(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rdx
+ movq 80(%r14), %rdi
+ movq %rdi, -48(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rcx
+ movq 88(%r14), %rdi
+ movq %rdi, -104(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rax
+ movq 96(%r14), %rdi
+ movq %rdi, -96(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r12
+ movq 104(%r14), %rdi
+ sbbq %rdi, %rbx
+ movq 112(%r14), %r8
+ sbbq %r8, %r13
+ movq 120(%r14), %r9
+ sbbq %r9, %r10
+ sbbq $0, %r15
+ addq -80(%rbp), %rsi ## 8-byte Folded Reload
+ adcq -88(%rbp), %rdx ## 8-byte Folded Reload
+ movq %rsi, 32(%r14)
+ adcq -72(%rbp), %rcx ## 8-byte Folded Reload
+ movq %rdx, 40(%r14)
+ adcq -64(%rbp), %rax ## 8-byte Folded Reload
+ movq %rcx, 48(%r14)
+ adcq %r11, %r12
+ movq %rax, 56(%r14)
+ movq %r12, 64(%r14)
+ adcq -56(%rbp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 72(%r14)
+ adcq -48(%rbp), %r13 ## 8-byte Folded Reload
+ movq %r13, 80(%r14)
+ adcq -104(%rbp), %r10 ## 8-byte Folded Reload
+ movq %r10, 88(%r14)
+ adcq -96(%rbp), %r15 ## 8-byte Folded Reload
+ movq %r15, 96(%r14)
+ adcq $0, %rdi
+ movq %rdi, 104(%r14)
+ adcq $0, %r8
+ movq %r8, 112(%r14)
+ adcq $0, %r9
+ movq %r9, 120(%r14)
+ addq $200, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre8L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre8L: ## @mcl_fpDbl_sqrPre8L
+## BB#0:
+ pushq %rbp
+ movq %rsp, %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $200, %rsp
+ movq %rsi, %rbx
+ movq %rdi, %r14
+ movq %rbx, %rdx
+ callq _mcl_fpDbl_mulPre4L
+ leaq 64(%r14), %rdi
+ leaq 32(%rbx), %rsi
+ movq %rsi, %rdx
+ callq _mcl_fpDbl_mulPre4L
+ movq 56(%rbx), %r15
+ movq 48(%rbx), %rax
+ movq (%rbx), %rcx
+ movq 8(%rbx), %rdx
+ addq 32(%rbx), %rcx
+ adcq 40(%rbx), %rdx
+ adcq 16(%rbx), %rax
+ adcq 24(%rbx), %r15
+ pushfq
+ popq %r8
+ pushfq
+ popq %r9
+ pushfq
+ popq %r10
+ pushfq
+ popq %rdi
+ pushfq
+ popq %rbx
+ sbbq %rsi, %rsi
+ movq %rsi, -56(%rbp) ## 8-byte Spill
+ leaq (%rcx,%rcx), %rsi
+ xorl %r11d, %r11d
+ pushq %rbx
+ popfq
+ cmovaeq %r11, %rsi
+ movq %rsi, -48(%rbp) ## 8-byte Spill
+ movq %rdx, %r13
+ shldq $1, %rcx, %r13
+ pushq %rdi
+ popfq
+ cmovaeq %r11, %r13
+ movq %rax, %r12
+ shldq $1, %rdx, %r12
+ pushq %r10
+ popfq
+ cmovaeq %r11, %r12
+ movq %r15, %rbx
+ movq %rcx, -168(%rbp)
+ movq %rdx, -160(%rbp)
+ movq %rax, -152(%rbp)
+ movq %r15, -144(%rbp)
+ movq %rcx, -136(%rbp)
+ movq %rdx, -128(%rbp)
+ movq %rax, -120(%rbp)
+ movq %r15, -112(%rbp)
+ shldq $1, %rax, %r15
+ pushq %r9
+ popfq
+ cmovaeq %r11, %r15
+ shrq $63, %rbx
+ pushq %r8
+ popfq
+ cmovaeq %r11, %rbx
+ leaq -232(%rbp), %rdi
+ leaq -168(%rbp), %rsi
+ leaq -136(%rbp), %rdx
+ callq _mcl_fpDbl_mulPre4L
+ movq -56(%rbp), %rax ## 8-byte Reload
+ andl $1, %eax
+ movq -48(%rbp), %r10 ## 8-byte Reload
+ addq -200(%rbp), %r10
+ adcq -192(%rbp), %r13
+ adcq -184(%rbp), %r12
+ adcq -176(%rbp), %r15
+ adcq %rbx, %rax
+ movq %rax, %rbx
+ movq -208(%rbp), %rax
+ movq -216(%rbp), %rcx
+ movq -232(%rbp), %rsi
+ movq -224(%rbp), %rdx
+ subq (%r14), %rsi
+ sbbq 8(%r14), %rdx
+ sbbq 16(%r14), %rcx
+ sbbq 24(%r14), %rax
+ movq 32(%r14), %r9
+ movq %r9, -56(%rbp) ## 8-byte Spill
+ movq 40(%r14), %r8
+ movq %r8, -48(%rbp) ## 8-byte Spill
+ sbbq %r9, %r10
+ sbbq %r8, %r13
+ movq 48(%r14), %rdi
+ movq %rdi, -104(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r12
+ movq 56(%r14), %rdi
+ movq %rdi, -96(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r15
+ sbbq $0, %rbx
+ movq 64(%r14), %r11
+ subq %r11, %rsi
+ movq 72(%r14), %rdi
+ movq %rdi, -88(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rdx
+ movq 80(%r14), %rdi
+ movq %rdi, -80(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rcx
+ movq 88(%r14), %rdi
+ movq %rdi, -72(%rbp) ## 8-byte Spill
+ sbbq %rdi, %rax
+ movq 96(%r14), %rdi
+ movq %rdi, -64(%rbp) ## 8-byte Spill
+ sbbq %rdi, %r10
+ movq 104(%r14), %rdi
+ sbbq %rdi, %r13
+ movq 112(%r14), %r8
+ sbbq %r8, %r12
+ movq 120(%r14), %r9
+ sbbq %r9, %r15
+ sbbq $0, %rbx
+ addq -56(%rbp), %rsi ## 8-byte Folded Reload
+ adcq -48(%rbp), %rdx ## 8-byte Folded Reload
+ movq %rsi, 32(%r14)
+ adcq -104(%rbp), %rcx ## 8-byte Folded Reload
+ movq %rdx, 40(%r14)
+ adcq -96(%rbp), %rax ## 8-byte Folded Reload
+ movq %rcx, 48(%r14)
+ adcq %r11, %r10
+ movq %rax, 56(%r14)
+ movq %r10, 64(%r14)
+ adcq -88(%rbp), %r13 ## 8-byte Folded Reload
+ movq %r13, 72(%r14)
+ adcq -80(%rbp), %r12 ## 8-byte Folded Reload
+ movq %r12, 80(%r14)
+ adcq -72(%rbp), %r15 ## 8-byte Folded Reload
+ movq %r15, 88(%r14)
+ movq %rbx, %rax
+ adcq -64(%rbp), %rax ## 8-byte Folded Reload
+ movq %rax, 96(%r14)
+ adcq $0, %rdi
+ movq %rdi, 104(%r14)
+ adcq $0, %r8
+ movq %r8, 112(%r14)
+ adcq $0, %r9
+ movq %r9, 120(%r14)
+ addq $200, %rsp
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont8L
+ .p2align 4, 0x90
+_mcl_fp_mont8L: ## @mcl_fp_mont8L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1256, %rsp ## imm = 0x4E8
+ movq %rcx, %r13
+ movq %rdx, 64(%rsp) ## 8-byte Spill
+ movq %rsi, 72(%rsp) ## 8-byte Spill
+ movq %rdi, 96(%rsp) ## 8-byte Spill
+ movq -8(%r13), %rbx
+ movq %rbx, 80(%rsp) ## 8-byte Spill
+ movq %r13, 56(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1184(%rsp), %rdi
+ callq l_mulPv512x64
+ movq 1184(%rsp), %r15
+ movq 1192(%rsp), %r14
+ movq %r15, %rdx
+ imulq %rbx, %rdx
+ movq 1248(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 1240(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 1232(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 1224(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 1216(%rsp), %r12
+ movq 1208(%rsp), %rbx
+ movq 1200(%rsp), %rbp
+ leaq 1112(%rsp), %rdi
+ movq %r13, %rsi
+ callq l_mulPv512x64
+ addq 1112(%rsp), %r15
+ adcq 1120(%rsp), %r14
+ adcq 1128(%rsp), %rbp
+ movq %rbp, 88(%rsp) ## 8-byte Spill
+ adcq 1136(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 1144(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 1152(%rsp), %r13
+ movq (%rsp), %rbx ## 8-byte Reload
+ adcq 1160(%rsp), %rbx
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 1168(%rsp), %rbp
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 1176(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1040(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %r15d
+ addq 1040(%rsp), %r14
+ movq 88(%rsp), %rax ## 8-byte Reload
+ adcq 1048(%rsp), %rax
+ movq %rax, 88(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 1056(%rsp), %rax
+ movq %rax, %r12
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 1064(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ adcq 1072(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ adcq 1080(%rsp), %rbx
+ movq %rbx, (%rsp) ## 8-byte Spill
+ adcq 1088(%rsp), %rbp
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 1096(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 1104(%rsp), %r15
+ movq %r15, 48(%rsp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq %r14, %rdx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 968(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %r15d
+ addq 968(%rsp), %r14
+ movq 88(%rsp), %r13 ## 8-byte Reload
+ adcq 976(%rsp), %r13
+ adcq 984(%rsp), %r12
+ movq %r12, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 992(%rsp), %r14
+ movq 16(%rsp), %rbx ## 8-byte Reload
+ adcq 1000(%rsp), %rbx
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 1008(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 1016(%rsp), %rbp
+ movq %rbp, %r12
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 1024(%rsp), %rbp
+ movq 48(%rsp), %rax ## 8-byte Reload
+ adcq 1032(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ adcq $0, %r15
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 896(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %r13, %rcx
+ addq 896(%rsp), %rcx
+ movq 32(%rsp), %r13 ## 8-byte Reload
+ adcq 904(%rsp), %r13
+ adcq 912(%rsp), %r14
+ adcq 920(%rsp), %rbx
+ movq %rbx, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 928(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 936(%rsp), %r12
+ movq %r12, 40(%rsp) ## 8-byte Spill
+ adcq 944(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 952(%rsp), %r12
+ adcq 960(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq %rcx, %rdx
+ movq %rcx, %rbp
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 824(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %ebx
+ addq 824(%rsp), %rbp
+ adcq 832(%rsp), %r13
+ movq %r13, 32(%rsp) ## 8-byte Spill
+ adcq 840(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 848(%rsp), %r13
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 856(%rsp), %rbp
+ movq 40(%rsp), %r14 ## 8-byte Reload
+ adcq 864(%rsp), %r14
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 872(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 880(%rsp), %r12
+ adcq 888(%rsp), %r15
+ adcq $0, %rbx
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 752(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 32(%rsp), %rax ## 8-byte Reload
+ addq 752(%rsp), %rax
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 760(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ adcq 776(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 784(%rsp), %r14
+ movq %r14, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 792(%rsp), %rbp
+ adcq 800(%rsp), %r12
+ adcq 808(%rsp), %r15
+ adcq 816(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 680(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %r13, %rax
+ andl $1, %eax
+ addq 680(%rsp), %rbx
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 688(%rsp), %r14
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 696(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 704(%rsp), %r13
+ movq 40(%rsp), %rbx ## 8-byte Reload
+ adcq 712(%rsp), %rbx
+ adcq 720(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq %r12, %rbp
+ adcq 728(%rsp), %rbp
+ adcq 736(%rsp), %r15
+ movq 32(%rsp), %r12 ## 8-byte Reload
+ adcq 744(%rsp), %r12
+ adcq $0, %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 608(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %r14, %rax
+ addq 608(%rsp), %rax
+ movq 16(%rsp), %r14 ## 8-byte Reload
+ adcq 616(%rsp), %r14
+ adcq 624(%rsp), %r13
+ movq %r13, (%rsp) ## 8-byte Spill
+ adcq 632(%rsp), %rbx
+ movq %rbx, %r13
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 640(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 648(%rsp), %rbp
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 656(%rsp), %r15
+ adcq 664(%rsp), %r12
+ movq %r12, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 672(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ sbbq %rbp, %rbp
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 536(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %rbp, %rax
+ andl $1, %eax
+ addq 536(%rsp), %rbx
+ adcq 544(%rsp), %r14
+ movq %r14, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %rbx ## 8-byte Reload
+ adcq 552(%rsp), %rbx
+ adcq 560(%rsp), %r13
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 568(%rsp), %rbp
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 576(%rsp), %r12
+ adcq 584(%rsp), %r15
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 592(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 600(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 464(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 16(%rsp), %rax ## 8-byte Reload
+ addq 464(%rsp), %rax
+ adcq 472(%rsp), %rbx
+ adcq 480(%rsp), %r13
+ movq %r13, 40(%rsp) ## 8-byte Spill
+ adcq 488(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq 496(%rsp), %r12
+ adcq 504(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r15 ## 8-byte Reload
+ adcq 512(%rsp), %r15
+ adcq 520(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 528(%rsp), %r14
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %rbp
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 392(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq %r13, %rax
+ andl $1, %eax
+ addq 392(%rsp), %rbp
+ adcq 400(%rsp), %rbx
+ movq %rbx, (%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 408(%rsp), %rbp
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 416(%rsp), %rbx
+ adcq 424(%rsp), %r12
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 432(%rsp), %r13
+ adcq 440(%rsp), %r15
+ movq %r15, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ adcq 448(%rsp), %r15
+ adcq 456(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 320(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq (%rsp), %rax ## 8-byte Reload
+ addq 320(%rsp), %rax
+ adcq 328(%rsp), %rbp
+ movq %rbp, 40(%rsp) ## 8-byte Spill
+ adcq 336(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ movq %r12, %rbp
+ adcq 344(%rsp), %rbp
+ adcq 352(%rsp), %r13
+ movq 32(%rsp), %r12 ## 8-byte Reload
+ adcq 360(%rsp), %r12
+ adcq 368(%rsp), %r15
+ movq %r15, 8(%rsp) ## 8-byte Spill
+ adcq 376(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 384(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 248(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %r15d
+ addq 248(%rsp), %rbx
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 256(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r14 ## 8-byte Reload
+ adcq 264(%rsp), %r14
+ adcq 272(%rsp), %rbp
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ movq %r13, %rbx
+ adcq 280(%rsp), %rbx
+ movq %r12, %rbp
+ adcq 288(%rsp), %rbp
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq 296(%rsp), %r13
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 312(%rsp), %r12
+ adcq $0, %r15
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 176(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 40(%rsp), %rax ## 8-byte Reload
+ addq 176(%rsp), %rax
+ adcq 184(%rsp), %r14
+ movq %r14, 24(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 192(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ adcq 200(%rsp), %rbx
+ movq %rbx, 16(%rsp) ## 8-byte Spill
+ adcq 208(%rsp), %rbp
+ adcq 216(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 224(%rsp), %r14
+ adcq 232(%rsp), %r12
+ adcq 240(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq 80(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %r13
+ leaq 104(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %ebx
+ addq 104(%rsp), %r13
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 112(%rsp), %rcx
+ movq 48(%rsp), %rdx ## 8-byte Reload
+ adcq 120(%rsp), %rdx
+ movq 16(%rsp), %rsi ## 8-byte Reload
+ adcq 128(%rsp), %rsi
+ movq %rbp, %rdi
+ adcq 136(%rsp), %rdi
+ movq %rdi, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r8 ## 8-byte Reload
+ adcq 144(%rsp), %r8
+ movq %r8, 8(%rsp) ## 8-byte Spill
+ movq %r14, %r9
+ adcq 152(%rsp), %r9
+ movq %r9, (%rsp) ## 8-byte Spill
+ adcq 160(%rsp), %r12
+ adcq 168(%rsp), %r15
+ adcq $0, %rbx
+ movq %rcx, %rax
+ movq %rcx, %r11
+ movq 56(%rsp), %rbp ## 8-byte Reload
+ subq (%rbp), %rax
+ movq %rdx, %rcx
+ movq %rdx, %r14
+ sbbq 8(%rbp), %rcx
+ movq %rsi, %rdx
+ movq %rsi, %r13
+ sbbq 16(%rbp), %rdx
+ movq %rdi, %rsi
+ sbbq 24(%rbp), %rsi
+ movq %r8, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %r9, %r10
+ sbbq 40(%rbp), %r10
+ movq %r12, %r8
+ sbbq 48(%rbp), %r8
+ movq %r15, %r9
+ sbbq 56(%rbp), %r9
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %r15, %r9
+ testb %bl, %bl
+ cmovneq %r11, %rax
+ movq 96(%rsp), %rbx ## 8-byte Reload
+ movq %rax, (%rbx)
+ cmovneq %r14, %rcx
+ movq %rcx, 8(%rbx)
+ cmovneq %r13, %rdx
+ movq %rdx, 16(%rbx)
+ cmovneq 32(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovneq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 32(%rbx)
+ cmovneq (%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 40(%rbx)
+ cmovneq %r12, %r8
+ movq %r8, 48(%rbx)
+ movq %r9, 56(%rbx)
+ addq $1256, %rsp ## imm = 0x4E8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF8L
+ .p2align 4, 0x90
+_mcl_fp_montNF8L: ## @mcl_fp_montNF8L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1240, %rsp ## imm = 0x4D8
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq %rdx, 48(%rsp) ## 8-byte Spill
+ movq %rsi, 56(%rsp) ## 8-byte Spill
+ movq %rdi, 80(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1168(%rsp), %rdi
+ callq l_mulPv512x64
+ movq 1168(%rsp), %r15
+ movq 1176(%rsp), %r12
+ movq %r15, %rdx
+ imulq %rbx, %rdx
+ movq 1232(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 1224(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 1216(%rsp), %r13
+ movq 1208(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 1200(%rsp), %r14
+ movq 1192(%rsp), %rbp
+ movq 1184(%rsp), %rbx
+ leaq 1096(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 1096(%rsp), %r15
+ adcq 1104(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq 1112(%rsp), %rbx
+ adcq 1120(%rsp), %rbp
+ adcq 1128(%rsp), %r14
+ movq %r14, %r12
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 1136(%rsp), %r14
+ adcq 1144(%rsp), %r13
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 1152(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 1160(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1024(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 1088(%rsp), %r15
+ movq 16(%rsp), %rax ## 8-byte Reload
+ addq 1024(%rsp), %rax
+ adcq 1032(%rsp), %rbx
+ movq %rbx, 72(%rsp) ## 8-byte Spill
+ movq %rbp, %rbx
+ adcq 1040(%rsp), %rbx
+ adcq 1048(%rsp), %r12
+ adcq 1056(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ movq %r13, %rbp
+ adcq 1064(%rsp), %rbp
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 1072(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r14 ## 8-byte Reload
+ adcq 1080(%rsp), %r14
+ adcq $0, %r15
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 952(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 952(%rsp), %r13
+ movq 72(%rsp), %rax ## 8-byte Reload
+ adcq 960(%rsp), %rax
+ movq %rax, 72(%rsp) ## 8-byte Spill
+ adcq 968(%rsp), %rbx
+ movq %rbx, 16(%rsp) ## 8-byte Spill
+ movq %r12, %rbx
+ adcq 976(%rsp), %rbx
+ movq 8(%rsp), %r12 ## 8-byte Reload
+ adcq 984(%rsp), %r12
+ adcq 992(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 1000(%rsp), %r13
+ movq %r14, %rbp
+ adcq 1008(%rsp), %rbp
+ adcq 1016(%rsp), %r15
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 880(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 944(%rsp), %r14
+ movq 72(%rsp), %rax ## 8-byte Reload
+ addq 880(%rsp), %rax
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 888(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq 896(%rsp), %rbx
+ adcq 904(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 912(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 920(%rsp), %r13
+ movq %r13, (%rsp) ## 8-byte Spill
+ adcq 928(%rsp), %rbp
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq 936(%rsp), %r15
+ adcq $0, %r14
+ movq %rax, %rdx
+ movq %rax, %rbp
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 808(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 808(%rsp), %rbp
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 816(%rsp), %r13
+ movq %rbx, %r12
+ adcq 824(%rsp), %r12
+ movq 8(%rsp), %rbx ## 8-byte Reload
+ adcq 832(%rsp), %rbx
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 840(%rsp), %rbp
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 848(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 856(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ adcq 864(%rsp), %r15
+ adcq 872(%rsp), %r14
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 736(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 800(%rsp), %rax
+ movq %r13, %rcx
+ addq 736(%rsp), %rcx
+ adcq 744(%rsp), %r12
+ movq %r12, 24(%rsp) ## 8-byte Spill
+ adcq 752(%rsp), %rbx
+ movq %rbx, 8(%rsp) ## 8-byte Spill
+ adcq 760(%rsp), %rbp
+ movq %rbp, %r13
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 768(%rsp), %rbp
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 776(%rsp), %rbx
+ adcq 784(%rsp), %r15
+ adcq 792(%rsp), %r14
+ adcq $0, %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq %rcx, %rdx
+ movq %rcx, %r12
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 664(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 664(%rsp), %r12
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 672(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 680(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ adcq 688(%rsp), %r13
+ adcq 696(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 704(%rsp), %rbx
+ adcq 712(%rsp), %r15
+ adcq 720(%rsp), %r14
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 728(%rsp), %r12
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 592(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 656(%rsp), %rcx
+ movq 24(%rsp), %rax ## 8-byte Reload
+ addq 592(%rsp), %rax
+ movq 8(%rsp), %rbp ## 8-byte Reload
+ adcq 600(%rsp), %rbp
+ adcq 608(%rsp), %r13
+ movq %r13, 24(%rsp) ## 8-byte Spill
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 616(%rsp), %r13
+ adcq 624(%rsp), %rbx
+ adcq 632(%rsp), %r15
+ adcq 640(%rsp), %r14
+ adcq 648(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 520(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 520(%rsp), %r12
+ adcq 528(%rsp), %rbp
+ movq %rbp, 8(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r12 ## 8-byte Reload
+ adcq 536(%rsp), %r12
+ movq %r13, %rbp
+ adcq 544(%rsp), %rbp
+ adcq 552(%rsp), %rbx
+ adcq 560(%rsp), %r15
+ adcq 568(%rsp), %r14
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 576(%rsp), %r13
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 584(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 448(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 512(%rsp), %rcx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ addq 448(%rsp), %rax
+ adcq 456(%rsp), %r12
+ movq %r12, 24(%rsp) ## 8-byte Spill
+ adcq 464(%rsp), %rbp
+ adcq 472(%rsp), %rbx
+ adcq 480(%rsp), %r15
+ adcq 488(%rsp), %r14
+ adcq 496(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 504(%rsp), %r13
+ adcq $0, %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 376(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 376(%rsp), %r12
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 384(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 392(%rsp), %rbp
+ adcq 400(%rsp), %rbx
+ adcq 408(%rsp), %r15
+ adcq 416(%rsp), %r14
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 424(%rsp), %r12
+ adcq 432(%rsp), %r13
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 440(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 304(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 368(%rsp), %rcx
+ movq 24(%rsp), %rax ## 8-byte Reload
+ addq 304(%rsp), %rax
+ adcq 312(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 320(%rsp), %rbx
+ adcq 328(%rsp), %r15
+ adcq 336(%rsp), %r14
+ adcq 344(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq 352(%rsp), %r13
+ movq 8(%rsp), %rbp ## 8-byte Reload
+ adcq 360(%rsp), %rbp
+ adcq $0, %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 64(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 232(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 232(%rsp), %r12
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 240(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 248(%rsp), %rbx
+ adcq 256(%rsp), %r15
+ adcq 264(%rsp), %r14
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 272(%rsp), %r12
+ adcq 280(%rsp), %r13
+ adcq 288(%rsp), %rbp
+ movq %rbp, 8(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rbp ## 8-byte Reload
+ adcq 296(%rsp), %rbp
+ movq 48(%rsp), %rax ## 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 160(%rsp), %rdi
+ movq 56(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ movq 224(%rsp), %rcx
+ movq (%rsp), %rax ## 8-byte Reload
+ addq 160(%rsp), %rax
+ adcq 168(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 176(%rsp), %r15
+ adcq 184(%rsp), %r14
+ adcq 192(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq 200(%rsp), %r13
+ movq 8(%rsp), %rbx ## 8-byte Reload
+ adcq 208(%rsp), %rbx
+ adcq 216(%rsp), %rbp
+ movq %rbp, %r12
+ adcq $0, %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbp
+ leaq 88(%rsp), %rdi
+ movq 40(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 88(%rsp), %rbp
+ movq 32(%rsp), %r11 ## 8-byte Reload
+ adcq 96(%rsp), %r11
+ adcq 104(%rsp), %r15
+ adcq 112(%rsp), %r14
+ movq 16(%rsp), %rsi ## 8-byte Reload
+ adcq 120(%rsp), %rsi
+ movq %rsi, 16(%rsp) ## 8-byte Spill
+ adcq 128(%rsp), %r13
+ adcq 136(%rsp), %rbx
+ movq %rbx, 8(%rsp) ## 8-byte Spill
+ adcq 144(%rsp), %r12
+ movq (%rsp), %r8 ## 8-byte Reload
+ adcq 152(%rsp), %r8
+ movq %r11, %rax
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ subq (%rbp), %rax
+ movq %r15, %rcx
+ sbbq 8(%rbp), %rcx
+ movq %r14, %rdx
+ sbbq 16(%rbp), %rdx
+ sbbq 24(%rbp), %rsi
+ movq %r13, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %rbx, %r9
+ sbbq 40(%rbp), %r9
+ movq %r12, %r10
+ sbbq 48(%rbp), %r10
+ movq %rbp, %rbx
+ movq %r8, %rbp
+ sbbq 56(%rbx), %rbp
+ testq %rbp, %rbp
+ cmovsq %r11, %rax
+ movq 80(%rsp), %rbx ## 8-byte Reload
+ movq %rax, (%rbx)
+ cmovsq %r15, %rcx
+ movq %rcx, 8(%rbx)
+ cmovsq %r14, %rdx
+ movq %rdx, 16(%rbx)
+ cmovsq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovsq %r13, %rdi
+ movq %rdi, 32(%rbx)
+ cmovsq 8(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 40(%rbx)
+ cmovsq %r12, %r10
+ movq %r10, 48(%rbx)
+ cmovsq %r8, %rbp
+ movq %rbp, 56(%rbx)
+ addq $1240, %rsp ## imm = 0x4D8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed8L
+ .p2align 4, 0x90
+_mcl_fp_montRed8L: ## @mcl_fp_montRed8L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $776, %rsp ## imm = 0x308
+ movq %rdx, %rax
+ movq %rdi, 192(%rsp) ## 8-byte Spill
+ movq -8(%rax), %rcx
+ movq %rcx, 104(%rsp) ## 8-byte Spill
+ movq (%rsi), %r15
+ movq 8(%rsi), %rdx
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq %r15, %rdx
+ imulq %rcx, %rdx
+ movq 120(%rsi), %rcx
+ movq %rcx, 112(%rsp) ## 8-byte Spill
+ movq 112(%rsi), %rcx
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ movq 104(%rsi), %rcx
+ movq %rcx, 96(%rsp) ## 8-byte Spill
+ movq 96(%rsi), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq 88(%rsi), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ movq 80(%rsi), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 72(%rsi), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 64(%rsi), %r13
+ movq 56(%rsi), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 48(%rsi), %r14
+ movq 40(%rsi), %rcx
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq 32(%rsi), %r12
+ movq 24(%rsi), %rbx
+ movq 16(%rsi), %rbp
+ movq %rax, %rcx
+ movq (%rcx), %rax
+ movq %rax, 136(%rsp) ## 8-byte Spill
+ movq 56(%rcx), %rax
+ movq %rax, 184(%rsp) ## 8-byte Spill
+ movq 48(%rcx), %rax
+ movq %rax, 176(%rsp) ## 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 168(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 160(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, 152(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 144(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, 128(%rsp) ## 8-byte Spill
+ movq %rcx, %rsi
+ movq %rsi, 88(%rsp) ## 8-byte Spill
+ leaq 704(%rsp), %rdi
+ callq l_mulPv512x64
+ addq 704(%rsp), %r15
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 712(%rsp), %rcx
+ adcq 720(%rsp), %rbp
+ movq %rbp, 80(%rsp) ## 8-byte Spill
+ adcq 728(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 736(%rsp), %r12
+ movq %r12, 120(%rsp) ## 8-byte Spill
+ movq 72(%rsp), %rax ## 8-byte Reload
+ adcq 744(%rsp), %rax
+ movq %rax, 72(%rsp) ## 8-byte Spill
+ adcq 752(%rsp), %r14
+ movq %r14, %r12
+ movq 64(%rsp), %rax ## 8-byte Reload
+ adcq 760(%rsp), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ adcq $0, 16(%rsp) ## 8-byte Folded Spill
+ movq 40(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 24(%rsp) ## 8-byte Folded Spill
+ adcq $0, 48(%rsp) ## 8-byte Folded Spill
+ adcq $0, 96(%rsp) ## 8-byte Folded Spill
+ movq 56(%rsp), %r13 ## 8-byte Reload
+ adcq $0, %r13
+ movq 112(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ sbbq %rbx, %rbx
+ movq %rcx, %rbp
+ movq %rbp, %rdx
+ imulq 104(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 632(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 632(%rsp), %rbp
+ movq 80(%rsp), %rsi ## 8-byte Reload
+ adcq 640(%rsp), %rsi
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 648(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq 120(%rsp), %rcx ## 8-byte Reload
+ adcq 656(%rsp), %rcx
+ movq %rcx, 120(%rsp) ## 8-byte Spill
+ movq 72(%rsp), %rcx ## 8-byte Reload
+ adcq 664(%rsp), %rcx
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ adcq 672(%rsp), %r12
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 680(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 688(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 696(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r15
+ movq %r15, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ movq 48(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 96(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r13
+ movq %r13, 56(%rsp) ## 8-byte Spill
+ adcq $0, %r14
+ movq %r14, 112(%rsp) ## 8-byte Spill
+ movq %rax, %rbp
+ adcq $0, %rbp
+ movq %rsi, %rdx
+ movq %rsi, %r14
+ imulq 104(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 560(%rsp), %rdi
+ movq 88(%rsp), %r13 ## 8-byte Reload
+ movq %r13, %rsi
+ callq l_mulPv512x64
+ addq 560(%rsp), %r14
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 568(%rsp), %rcx
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 576(%rsp), %rax
+ movq %rax, 120(%rsp) ## 8-byte Spill
+ movq 72(%rsp), %rax ## 8-byte Reload
+ adcq 584(%rsp), %rax
+ movq %rax, 72(%rsp) ## 8-byte Spill
+ adcq 592(%rsp), %r12
+ movq %r12, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %r14 ## 8-byte Reload
+ adcq 600(%rsp), %r14
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 608(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 616(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 624(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq $0, %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r15
+ movq %r15, 48(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ movq 56(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 112(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rbp
+ movq %rbp, 80(%rsp) ## 8-byte Spill
+ movq %rcx, %rbp
+ movq %rbp, %rdx
+ movq 104(%rsp), %r12 ## 8-byte Reload
+ imulq %r12, %rdx
+ leaq 488(%rsp), %rdi
+ movq %r13, %rsi
+ callq l_mulPv512x64
+ addq 488(%rsp), %rbp
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 496(%rsp), %rax
+ movq 72(%rsp), %rbp ## 8-byte Reload
+ adcq 504(%rsp), %rbp
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 512(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ adcq 520(%rsp), %r14
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 528(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 536(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %r13 ## 8-byte Reload
+ adcq 544(%rsp), %r13
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 552(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, 48(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 96(%rsp) ## 8-byte Spill
+ movq %r15, %rbx
+ adcq $0, %rbx
+ adcq $0, 112(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq %r12, %rdx
+ leaq 416(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 416(%rsp), %r15
+ adcq 424(%rsp), %rbp
+ movq %rbp, %rax
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq %r14, %r12
+ adcq 440(%rsp), %r12
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 448(%rsp), %r14
+ movq 16(%rsp), %rbp ## 8-byte Reload
+ adcq 456(%rsp), %rbp
+ adcq 464(%rsp), %r13
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 472(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ adcq $0, 96(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ movq 112(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 104(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 344(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 344(%rsp), %rbx
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 352(%rsp), %rax
+ adcq 360(%rsp), %r12
+ movq %r12, 64(%rsp) ## 8-byte Spill
+ adcq 368(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ adcq 376(%rsp), %rbp
+ movq %rbp, 16(%rsp) ## 8-byte Spill
+ adcq 384(%rsp), %r13
+ movq %r13, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r13 ## 8-byte Reload
+ adcq 392(%rsp), %r13
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 400(%rsp), %r12
+ movq 96(%rsp), %r14 ## 8-byte Reload
+ adcq 408(%rsp), %r14
+ movq 56(%rsp), %rbp ## 8-byte Reload
+ adcq $0, %rbp
+ movq %r15, %rbx
+ adcq $0, %rbx
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq 104(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 272(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 272(%rsp), %r15
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 280(%rsp), %rcx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 288(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 296(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 312(%rsp), %r13
+ movq %r13, 24(%rsp) ## 8-byte Spill
+ adcq 320(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ adcq 328(%rsp), %r14
+ movq %r14, %r13
+ adcq 336(%rsp), %rbp
+ movq %rbp, %r12
+ adcq $0, %rbx
+ movq %rbx, %r14
+ movq 80(%rsp), %r15 ## 8-byte Reload
+ adcq $0, %r15
+ movq 104(%rsp), %rdx ## 8-byte Reload
+ movq %rcx, %rbx
+ imulq %rbx, %rdx
+ leaq 200(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv512x64
+ addq 200(%rsp), %rbx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 208(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r8 ## 8-byte Reload
+ adcq 216(%rsp), %r8
+ movq %r8, 16(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rdx ## 8-byte Reload
+ adcq 224(%rsp), %rdx
+ movq 24(%rsp), %rsi ## 8-byte Reload
+ adcq 232(%rsp), %rsi
+ movq 48(%rsp), %rdi ## 8-byte Reload
+ adcq 240(%rsp), %rdi
+ movq %r13, %rbp
+ adcq 248(%rsp), %rbp
+ movq %r12, %rbx
+ adcq 256(%rsp), %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ movq %r14, %r9
+ adcq 264(%rsp), %r9
+ adcq $0, %r15
+ movq %r15, %r10
+ subq 136(%rsp), %rax ## 8-byte Folded Reload
+ movq %r8, %rcx
+ sbbq 128(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rdx, %r13
+ sbbq 144(%rsp), %r13 ## 8-byte Folded Reload
+ movq %rsi, %r12
+ sbbq 152(%rsp), %r12 ## 8-byte Folded Reload
+ movq %rdi, %r14
+ sbbq 160(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rbp, %r11
+ sbbq 168(%rsp), %r11 ## 8-byte Folded Reload
+ movq %rbx, %r8
+ sbbq 176(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r9, %r15
+ sbbq 184(%rsp), %r9 ## 8-byte Folded Reload
+ sbbq $0, %r10
+ andl $1, %r10d
+ cmovneq %r15, %r9
+ testb %r10b, %r10b
+ cmovneq 8(%rsp), %rax ## 8-byte Folded Reload
+ movq 192(%rsp), %rbx ## 8-byte Reload
+ movq %rax, (%rbx)
+ cmovneq 16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 8(%rbx)
+ cmovneq %rdx, %r13
+ movq %r13, 16(%rbx)
+ cmovneq %rsi, %r12
+ movq %r12, 24(%rbx)
+ cmovneq %rdi, %r14
+ movq %r14, 32(%rbx)
+ cmovneq %rbp, %r11
+ movq %r11, 40(%rbx)
+ cmovneq 56(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 48(%rbx)
+ movq %r9, 56(%rbx)
+ addq $776, %rsp ## imm = 0x308
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre8L
+ .p2align 4, 0x90
+_mcl_fp_addPre8L: ## @mcl_fp_addPre8L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 56(%rsi), %r15
+ movq 48(%rdx), %r9
+ movq 48(%rsi), %r12
+ movq 40(%rdx), %r10
+ movq 32(%rdx), %r11
+ movq 24(%rdx), %r14
+ movq 16(%rdx), %rbx
+ movq (%rdx), %rcx
+ movq 8(%rdx), %rdx
+ addq (%rsi), %rcx
+ adcq 8(%rsi), %rdx
+ adcq 16(%rsi), %rbx
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %rax
+ movq 32(%rsi), %rsi
+ movq %rcx, (%rdi)
+ movq %rdx, 8(%rdi)
+ movq %rbx, 16(%rdi)
+ adcq %r14, %rax
+ movq %rax, 24(%rdi)
+ adcq %r11, %rsi
+ movq %rsi, 32(%rdi)
+ adcq %r10, %r13
+ movq %r13, 40(%rdi)
+ adcq %r9, %r12
+ movq %r12, 48(%rdi)
+ adcq %r8, %r15
+ movq %r15, 56(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subPre8L
+ .p2align 4, 0x90
+_mcl_fp_subPre8L: ## @mcl_fp_subPre8L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 56(%rsi), %r15
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %r10
+ movq 24(%rdx), %r11
+ movq 32(%rdx), %r14
+ movq (%rsi), %rbx
+ movq 8(%rsi), %r12
+ xorl %eax, %eax
+ subq (%rdx), %rbx
+ sbbq 8(%rdx), %r12
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq 48(%rsi), %r13
+ movq 40(%rsi), %rdx
+ movq 32(%rsi), %rbp
+ movq 24(%rsi), %rsi
+ movq %rbx, (%rdi)
+ movq %r12, 8(%rdi)
+ movq %rcx, 16(%rdi)
+ sbbq %r11, %rsi
+ movq %rsi, 24(%rdi)
+ sbbq %r14, %rbp
+ movq %rbp, 32(%rdi)
+ sbbq %r10, %rdx
+ movq %rdx, 40(%rdi)
+ sbbq %r9, %r13
+ movq %r13, 48(%rdi)
+ sbbq %r8, %r15
+ movq %r15, 56(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_shr1_8L
+ .p2align 4, 0x90
+_mcl_fp_shr1_8L: ## @mcl_fp_shr1_8L
+## BB#0:
+ movq 56(%rsi), %r8
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r10
+ movq 32(%rsi), %r11
+ movq 24(%rsi), %rcx
+ movq 16(%rsi), %rdx
+ movq (%rsi), %rax
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rax
+ movq %rax, (%rdi)
+ shrdq $1, %rdx, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 16(%rdi)
+ shrdq $1, %r11, %rcx
+ movq %rcx, 24(%rdi)
+ shrdq $1, %r10, %r11
+ movq %r11, 32(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 40(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 48(%rdi)
+ shrq %r8
+ movq %r8, 56(%rdi)
+ retq
+
+ .globl _mcl_fp_add8L
+ .p2align 4, 0x90
+_mcl_fp_add8L: ## @mcl_fp_add8L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r15
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r12
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r13
+ movq 24(%rsi), %r11
+ movq 32(%rsi), %r10
+ movq (%rdx), %r14
+ movq 8(%rdx), %rbx
+ addq (%rsi), %r14
+ adcq 8(%rsi), %rbx
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r11
+ movq 40(%rdx), %rsi
+ adcq 32(%rdx), %r10
+ movq %r14, (%rdi)
+ movq %rbx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r10, 32(%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 40(%rdi)
+ adcq %r12, %r9
+ movq %r9, 48(%rdi)
+ adcq %r15, %r8
+ movq %r8, 56(%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %r14
+ sbbq 8(%rcx), %rbx
+ sbbq 16(%rcx), %rax
+ sbbq 24(%rcx), %r11
+ sbbq 32(%rcx), %r10
+ sbbq 40(%rcx), %rsi
+ sbbq 48(%rcx), %r9
+ sbbq 56(%rcx), %r8
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne LBB120_2
+## BB#1: ## %nocarry
+ movq %r14, (%rdi)
+ movq %rbx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r10, 32(%rdi)
+ movq %rsi, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %r8, 56(%rdi)
+LBB120_2: ## %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addNF8L
+ .p2align 4, 0x90
+_mcl_fp_addNF8L: ## @mcl_fp_addNF8L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r8
+ movq 48(%rdx), %rbp
+ movq 40(%rdx), %rbx
+ movq 32(%rdx), %rax
+ movq 24(%rdx), %r11
+ movq 16(%rdx), %r15
+ movq (%rdx), %r13
+ movq 8(%rdx), %r12
+ addq (%rsi), %r13
+ adcq 8(%rsi), %r12
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %r11
+ adcq 32(%rsi), %rax
+ movq %rax, %r10
+ movq %r10, -24(%rsp) ## 8-byte Spill
+ adcq 40(%rsi), %rbx
+ movq %rbx, %r9
+ movq %r9, -16(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %rbp
+ movq %rbp, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ adcq 56(%rsi), %r8
+ movq %r13, %rsi
+ subq (%rcx), %rsi
+ movq %r12, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r15, %rbx
+ sbbq 16(%rcx), %rbx
+ movq %r11, %r14
+ sbbq 24(%rcx), %r14
+ movq %r10, %rbp
+ sbbq 32(%rcx), %rbp
+ movq %r9, %r10
+ sbbq 40(%rcx), %r10
+ movq %rax, %r9
+ sbbq 48(%rcx), %r9
+ movq %r8, %rax
+ sbbq 56(%rcx), %rax
+ testq %rax, %rax
+ cmovsq %r13, %rsi
+ movq %rsi, (%rdi)
+ cmovsq %r12, %rdx
+ movq %rdx, 8(%rdi)
+ cmovsq %r15, %rbx
+ movq %rbx, 16(%rdi)
+ cmovsq %r11, %r14
+ movq %r14, 24(%rdi)
+ cmovsq -24(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rdi)
+ cmovsq -16(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 40(%rdi)
+ cmovsq -8(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 48(%rdi)
+ cmovsq %r8, %rax
+ movq %rax, 56(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_sub8L
+ .p2align 4, 0x90
+_mcl_fp_sub8L: ## @mcl_fp_sub8L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 56(%rdx), %r12
+ movq 56(%rsi), %r8
+ movq 48(%rdx), %r13
+ movq (%rsi), %rax
+ movq 8(%rsi), %r10
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r10
+ movq 16(%rsi), %r11
+ sbbq 16(%rdx), %r11
+ movq 24(%rsi), %r15
+ sbbq 24(%rdx), %r15
+ movq 32(%rsi), %r14
+ sbbq 32(%rdx), %r14
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %rsi
+ sbbq 40(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r10, 8(%rdi)
+ movq %r11, 16(%rdi)
+ movq %r15, 24(%rdi)
+ movq %r14, 32(%rdi)
+ movq %rsi, 40(%rdi)
+ sbbq %r13, %r9
+ movq %r9, 48(%rdi)
+ sbbq %r12, %r8
+ movq %r8, 56(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB122_2
+## BB#1: ## %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ movq 8(%rcx), %rax
+ adcq %r10, %rax
+ movq %rax, 8(%rdi)
+ movq 16(%rcx), %rax
+ adcq %r11, %rax
+ movq %rax, 16(%rdi)
+ movq 24(%rcx), %rax
+ adcq %r15, %rax
+ movq %rax, 24(%rdi)
+ movq 32(%rcx), %rax
+ adcq %r14, %rax
+ movq %rax, 32(%rdi)
+ movq 40(%rcx), %rax
+ adcq %rsi, %rax
+ movq %rax, 40(%rdi)
+ movq 48(%rcx), %rax
+ adcq %r9, %rax
+ movq %rax, 48(%rdi)
+ movq 56(%rcx), %rax
+ adcq %r8, %rax
+ movq %rax, 56(%rdi)
+LBB122_2: ## %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subNF8L
+ .p2align 4, 0x90
+_mcl_fp_subNF8L: ## @mcl_fp_subNF8L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq %rdi, %r9
+ movdqu (%rdx), %xmm0
+ movdqu 16(%rdx), %xmm1
+ movdqu 32(%rdx), %xmm2
+ movdqu 48(%rdx), %xmm3
+ pshufd $78, %xmm3, %xmm4 ## xmm4 = xmm3[2,3,0,1]
+ movd %xmm4, %r12
+ movdqu (%rsi), %xmm4
+ movdqu 16(%rsi), %xmm5
+ movdqu 32(%rsi), %xmm8
+ movdqu 48(%rsi), %xmm7
+ pshufd $78, %xmm7, %xmm6 ## xmm6 = xmm7[2,3,0,1]
+ movd %xmm6, %rcx
+ movd %xmm3, %r13
+ movd %xmm7, %rdi
+ pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1]
+ movd %xmm3, %rbp
+ pshufd $78, %xmm8, %xmm3 ## xmm3 = xmm8[2,3,0,1]
+ movd %xmm3, %rdx
+ movd %xmm2, %rsi
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %r11
+ pshufd $78, %xmm5, %xmm2 ## xmm2 = xmm5[2,3,0,1]
+ movd %xmm1, %r15
+ pshufd $78, %xmm0, %xmm1 ## xmm1 = xmm0[2,3,0,1]
+ movd %xmm1, %rbx
+ pshufd $78, %xmm4, %xmm1 ## xmm1 = xmm4[2,3,0,1]
+ movd %xmm0, %rax
+ movd %xmm4, %r14
+ subq %rax, %r14
+ movd %xmm1, %r10
+ sbbq %rbx, %r10
+ movd %xmm5, %rbx
+ sbbq %r15, %rbx
+ movd %xmm2, %r15
+ sbbq %r11, %r15
+ movd %xmm8, %r11
+ sbbq %rsi, %r11
+ sbbq %rbp, %rdx
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ sbbq %r13, %rdi
+ movq %rdi, -16(%rsp) ## 8-byte Spill
+ sbbq %r12, %rcx
+ movq %rcx, -8(%rsp) ## 8-byte Spill
+ movq %rcx, %rbp
+ sarq $63, %rbp
+ movq 56(%r8), %r12
+ andq %rbp, %r12
+ movq 48(%r8), %r13
+ andq %rbp, %r13
+ movq 40(%r8), %rdi
+ andq %rbp, %rdi
+ movq 32(%r8), %rsi
+ andq %rbp, %rsi
+ movq 24(%r8), %rdx
+ andq %rbp, %rdx
+ movq 16(%r8), %rcx
+ andq %rbp, %rcx
+ movq 8(%r8), %rax
+ andq %rbp, %rax
+ andq (%r8), %rbp
+ addq %r14, %rbp
+ adcq %r10, %rax
+ movq %rbp, (%r9)
+ adcq %rbx, %rcx
+ movq %rax, 8(%r9)
+ movq %rcx, 16(%r9)
+ adcq %r15, %rdx
+ movq %rdx, 24(%r9)
+ adcq %r11, %rsi
+ movq %rsi, 32(%r9)
+ adcq -24(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 40(%r9)
+ adcq -16(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 48(%r9)
+ adcq -8(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, 56(%r9)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_add8L
+ .p2align 4, 0x90
+_mcl_fpDbl_add8L: ## @mcl_fpDbl_add8L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r8
+ movq 120(%rdx), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq 112(%rdx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq 104(%rdx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq 96(%rdx), %r14
+ movq 24(%rsi), %r15
+ movq 32(%rsi), %r11
+ movq 16(%rdx), %r12
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rax
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rax
+ adcq 16(%rsi), %r12
+ adcq 24(%rdx), %r15
+ adcq 32(%rdx), %r11
+ movq 88(%rdx), %rbp
+ movq 80(%rdx), %r13
+ movq %rbx, (%rdi)
+ movq 72(%rdx), %r10
+ movq %rax, 8(%rdi)
+ movq 64(%rdx), %r9
+ movq %r12, 16(%rdi)
+ movq 40(%rdx), %r12
+ movq %r15, 24(%rdi)
+ movq 40(%rsi), %rbx
+ adcq %r12, %rbx
+ movq 56(%rdx), %r15
+ movq 48(%rdx), %r12
+ movq %r11, 32(%rdi)
+ movq 48(%rsi), %rdx
+ adcq %r12, %rdx
+ movq 120(%rsi), %r12
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rax
+ adcq %r15, %rax
+ movq 112(%rsi), %rcx
+ movq %rdx, 48(%rdi)
+ movq 64(%rsi), %rbx
+ adcq %r9, %rbx
+ movq 104(%rsi), %rdx
+ movq %rax, 56(%rdi)
+ movq 72(%rsi), %r9
+ adcq %r10, %r9
+ movq 80(%rsi), %r11
+ adcq %r13, %r11
+ movq 96(%rsi), %rax
+ movq 88(%rsi), %r15
+ adcq %rbp, %r15
+ adcq %r14, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rdx, %rax
+ adcq -24(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ adcq -16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -16(%rsp) ## 8-byte Spill
+ adcq -32(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, -32(%rsp) ## 8-byte Spill
+ sbbq %rbp, %rbp
+ andl $1, %ebp
+ movq %rbx, %rsi
+ subq (%r8), %rsi
+ movq %r9, %rdx
+ sbbq 8(%r8), %rdx
+ movq %r11, %r10
+ sbbq 16(%r8), %r10
+ movq %r15, %r14
+ sbbq 24(%r8), %r14
+ movq -8(%rsp), %r13 ## 8-byte Reload
+ sbbq 32(%r8), %r13
+ movq %rax, %r12
+ sbbq 40(%r8), %r12
+ movq %rcx, %rax
+ sbbq 48(%r8), %rax
+ movq -32(%rsp), %rcx ## 8-byte Reload
+ sbbq 56(%r8), %rcx
+ sbbq $0, %rbp
+ andl $1, %ebp
+ cmovneq %rbx, %rsi
+ movq %rsi, 64(%rdi)
+ testb %bpl, %bpl
+ cmovneq %r9, %rdx
+ movq %rdx, 72(%rdi)
+ cmovneq %r11, %r10
+ movq %r10, 80(%rdi)
+ cmovneq %r15, %r14
+ movq %r14, 88(%rdi)
+ cmovneq -8(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 96(%rdi)
+ cmovneq -24(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, 104(%rdi)
+ cmovneq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 112(%rdi)
+ cmovneq -32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 120(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub8L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub8L: ## @mcl_fpDbl_sub8L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r15
+ movq 120(%rdx), %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 112(%rdx), %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq 104(%rdx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r9
+ movq (%rsi), %r12
+ movq 8(%rsi), %r14
+ xorl %r8d, %r8d
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r14
+ sbbq 16(%rdx), %r9
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %r13
+ sbbq 32(%rdx), %r13
+ movq 96(%rdx), %rbp
+ movq 88(%rdx), %r11
+ movq %r12, (%rdi)
+ movq 80(%rdx), %r12
+ movq %r14, 8(%rdi)
+ movq 72(%rdx), %r10
+ movq %r9, 16(%rdi)
+ movq 40(%rdx), %r9
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %r9, %rbx
+ movq 48(%rdx), %r9
+ movq %r13, 32(%rdi)
+ movq 48(%rsi), %r14
+ sbbq %r9, %r14
+ movq 64(%rdx), %r13
+ movq 56(%rdx), %r9
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rdx
+ sbbq %r9, %rdx
+ movq 120(%rsi), %rcx
+ movq %r14, 48(%rdi)
+ movq 64(%rsi), %rbx
+ sbbq %r13, %rbx
+ movq 112(%rsi), %rax
+ movq %rdx, 56(%rdi)
+ movq 72(%rsi), %r9
+ sbbq %r10, %r9
+ movq 80(%rsi), %r13
+ sbbq %r12, %r13
+ movq 88(%rsi), %r12
+ sbbq %r11, %r12
+ movq 104(%rsi), %rdx
+ movq 96(%rsi), %r14
+ sbbq %rbp, %r14
+ sbbq -24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ sbbq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ sbbq -8(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -8(%rsp) ## 8-byte Spill
+ movl $0, %ebp
+ sbbq $0, %rbp
+ andl $1, %ebp
+ movq (%r15), %r11
+ cmoveq %r8, %r11
+ testb %bpl, %bpl
+ movq 16(%r15), %rbp
+ cmoveq %r8, %rbp
+ movq 8(%r15), %rsi
+ cmoveq %r8, %rsi
+ movq 56(%r15), %r10
+ cmoveq %r8, %r10
+ movq 48(%r15), %rdx
+ cmoveq %r8, %rdx
+ movq 40(%r15), %rcx
+ cmoveq %r8, %rcx
+ movq 32(%r15), %rax
+ cmoveq %r8, %rax
+ cmovneq 24(%r15), %r8
+ addq %rbx, %r11
+ adcq %r9, %rsi
+ movq %r11, 64(%rdi)
+ adcq %r13, %rbp
+ movq %rsi, 72(%rdi)
+ movq %rbp, 80(%rdi)
+ adcq %r12, %r8
+ movq %r8, 88(%rdi)
+ adcq %r14, %rax
+ movq %rax, 96(%rdi)
+ adcq -24(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 104(%rdi)
+ adcq -16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 112(%rdi)
+ adcq -8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 120(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .p2align 4, 0x90
+l_mulPv576x64: ## @mulPv576x64
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdx, %rbx
+ movq %rbx, %rax
+ mulq (%rsi)
+ movq %rdx, -32(%rsp) ## 8-byte Spill
+ movq %rax, (%rdi)
+ movq %rbx, %rax
+ mulq 64(%rsi)
+ movq %rdx, %r10
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 56(%rsi)
+ movq %rdx, %r14
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 48(%rsi)
+ movq %rdx, %r12
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 40(%rsi)
+ movq %rdx, %rcx
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq %rbx, %rax
+ mulq 32(%rsi)
+ movq %rdx, %rbp
+ movq %rax, %r8
+ movq %rbx, %rax
+ mulq 24(%rsi)
+ movq %rdx, %r9
+ movq %rax, %r11
+ movq %rbx, %rax
+ mulq 16(%rsi)
+ movq %rdx, %r15
+ movq %rax, %r13
+ movq %rbx, %rax
+ mulq 8(%rsi)
+ addq -32(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 8(%rdi)
+ adcq %r13, %rdx
+ movq %rdx, 16(%rdi)
+ adcq %r11, %r15
+ movq %r15, 24(%rdi)
+ adcq %r8, %r9
+ movq %r9, 32(%rdi)
+ adcq -40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 40(%rdi)
+ adcq -24(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 48(%rdi)
+ adcq -16(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, 56(%rdi)
+ adcq -8(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, 64(%rdi)
+ adcq $0, %r10
+ movq %r10, 72(%rdi)
+ movq %rdi, %rax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mulUnitPre9L
+ .p2align 4, 0x90
+_mcl_fp_mulUnitPre9L: ## @mcl_fp_mulUnitPre9L
+## BB#0:
+ pushq %r14
+ pushq %rbx
+ subq $88, %rsp
+ movq %rdi, %rbx
+ leaq 8(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 80(%rsp), %r8
+ movq 72(%rsp), %r9
+ movq 64(%rsp), %r10
+ movq 56(%rsp), %r11
+ movq 48(%rsp), %r14
+ movq 40(%rsp), %rax
+ movq 32(%rsp), %rcx
+ movq 24(%rsp), %rdx
+ movq 8(%rsp), %rsi
+ movq 16(%rsp), %rdi
+ movq %rsi, (%rbx)
+ movq %rdi, 8(%rbx)
+ movq %rdx, 16(%rbx)
+ movq %rcx, 24(%rbx)
+ movq %rax, 32(%rbx)
+ movq %r14, 40(%rbx)
+ movq %r11, 48(%rbx)
+ movq %r10, 56(%rbx)
+ movq %r9, 64(%rbx)
+ movq %r8, 72(%rbx)
+ addq $88, %rsp
+ popq %rbx
+ popq %r14
+ retq
+
+ .globl _mcl_fpDbl_mulPre9L
+ .p2align 4, 0x90
+_mcl_fpDbl_mulPre9L: ## @mcl_fpDbl_mulPre9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $808, %rsp ## imm = 0x328
+ movq %rdx, %rax
+ movq %rdi, %r12
+ movq (%rax), %rdx
+ movq %rax, %rbx
+ movq %rbx, 80(%rsp) ## 8-byte Spill
+ leaq 728(%rsp), %rdi
+ movq %rsi, %rbp
+ movq %rbp, 72(%rsp) ## 8-byte Spill
+ callq l_mulPv576x64
+ movq 800(%rsp), %r13
+ movq 792(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 784(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 776(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 768(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 760(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 752(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 744(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 728(%rsp), %rax
+ movq 736(%rsp), %r14
+ movq %rax, (%r12)
+ movq %r12, 64(%rsp) ## 8-byte Spill
+ movq 8(%rbx), %rdx
+ leaq 648(%rsp), %rdi
+ movq %rbp, %rsi
+ callq l_mulPv576x64
+ movq 720(%rsp), %r8
+ movq 712(%rsp), %rcx
+ movq 704(%rsp), %rdx
+ movq 696(%rsp), %rsi
+ movq 688(%rsp), %rdi
+ movq 680(%rsp), %rbp
+ addq 648(%rsp), %r14
+ movq 672(%rsp), %rax
+ movq 656(%rsp), %rbx
+ movq 664(%rsp), %r15
+ movq %r14, 8(%r12)
+ adcq 24(%rsp), %rbx ## 8-byte Folded Reload
+ adcq 32(%rsp), %r15 ## 8-byte Folded Reload
+ adcq 40(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, %r14
+ adcq (%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 32(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 40(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, (%rsp) ## 8-byte Spill
+ adcq %r13, %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 16(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %r13 ## 8-byte Reload
+ movq 16(%r13), %rdx
+ leaq 568(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 640(%rsp), %r8
+ movq 632(%rsp), %r9
+ movq 624(%rsp), %r10
+ movq 616(%rsp), %rdi
+ movq 608(%rsp), %rbp
+ movq 600(%rsp), %rcx
+ addq 568(%rsp), %rbx
+ movq 592(%rsp), %rdx
+ movq 576(%rsp), %r12
+ movq 584(%rsp), %rsi
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq %rbx, 16(%rax)
+ adcq %r15, %r12
+ adcq %r14, %rsi
+ movq %rsi, 48(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 40(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 8(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 16(%rsp) ## 8-byte Spill
+ movq 24(%r13), %rdx
+ leaq 488(%rsp), %rdi
+ movq 72(%rsp), %r15 ## 8-byte Reload
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 560(%rsp), %r8
+ movq 552(%rsp), %rcx
+ movq 544(%rsp), %rdx
+ movq 536(%rsp), %rsi
+ movq 528(%rsp), %rdi
+ movq 520(%rsp), %rbp
+ addq 488(%rsp), %r12
+ movq 512(%rsp), %rax
+ movq 496(%rsp), %rbx
+ movq 504(%rsp), %r13
+ movq 64(%rsp), %r14 ## 8-byte Reload
+ movq %r12, 24(%r14)
+ adcq 48(%rsp), %rbx ## 8-byte Folded Reload
+ adcq 56(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 24(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 40(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, (%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %r12 ## 8-byte Reload
+ movq 32(%r12), %rdx
+ leaq 408(%rsp), %rdi
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 480(%rsp), %r8
+ movq 472(%rsp), %r9
+ movq 464(%rsp), %rdx
+ movq 456(%rsp), %rsi
+ movq 448(%rsp), %rdi
+ movq 440(%rsp), %rbp
+ addq 408(%rsp), %rbx
+ movq 432(%rsp), %rax
+ movq 416(%rsp), %r15
+ movq 424(%rsp), %rcx
+ movq %rbx, 32(%r14)
+ adcq %r13, %r15
+ adcq 24(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 40(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) ## 8-byte Spill
+ movq %r12, %r14
+ movq 40(%r14), %rdx
+ leaq 328(%rsp), %rdi
+ movq 72(%rsp), %r13 ## 8-byte Reload
+ movq %r13, %rsi
+ callq l_mulPv576x64
+ movq 400(%rsp), %r8
+ movq 392(%rsp), %r9
+ movq 384(%rsp), %rsi
+ movq 376(%rsp), %rdi
+ movq 368(%rsp), %rbx
+ movq 360(%rsp), %rbp
+ addq 328(%rsp), %r15
+ movq 352(%rsp), %rcx
+ movq 336(%rsp), %r12
+ movq 344(%rsp), %rdx
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq %r15, 40(%rax)
+ adcq 56(%rsp), %r12 ## 8-byte Folded Reload
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 40(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) ## 8-byte Spill
+ movq 48(%r14), %rdx
+ leaq 248(%rsp), %rdi
+ movq %r13, %rsi
+ movq %r13, %r15
+ callq l_mulPv576x64
+ movq 320(%rsp), %r8
+ movq 312(%rsp), %r9
+ movq 304(%rsp), %rsi
+ movq 296(%rsp), %rdi
+ movq 288(%rsp), %rbx
+ movq 280(%rsp), %rbp
+ addq 248(%rsp), %r12
+ movq 272(%rsp), %rcx
+ movq 256(%rsp), %r13
+ movq 264(%rsp), %rdx
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq %r12, 48(%rax)
+ adcq 56(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq (%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 40(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 48(%rsp) ## 8-byte Spill
+ movq 56(%r14), %rdx
+ leaq 168(%rsp), %rdi
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 240(%rsp), %rcx
+ movq 232(%rsp), %rdx
+ movq 224(%rsp), %rsi
+ movq 216(%rsp), %rdi
+ movq 208(%rsp), %rbx
+ addq 168(%rsp), %r13
+ movq 200(%rsp), %r12
+ movq 192(%rsp), %rbp
+ movq 176(%rsp), %r14
+ movq 184(%rsp), %r15
+ movq 64(%rsp), %rax ## 8-byte Reload
+ movq %r13, 56(%rax)
+ adcq 56(%rsp), %r14 ## 8-byte Folded Reload
+ adcq 24(%rsp), %r15 ## 8-byte Folded Reload
+ adcq 32(%rsp), %rbp ## 8-byte Folded Reload
+ adcq 40(%rsp), %r12 ## 8-byte Folded Reload
+ adcq (%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %r13
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, (%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 48(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 88(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 88(%rsp), %r14
+ adcq 96(%rsp), %r15
+ movq 160(%rsp), %r8
+ adcq 104(%rsp), %rbp
+ movq 152(%rsp), %r9
+ movq 144(%rsp), %rdx
+ movq 136(%rsp), %rsi
+ movq 128(%rsp), %rdi
+ movq 120(%rsp), %rbx
+ movq 112(%rsp), %rax
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ movq %r14, 64(%rcx)
+ movq %r15, 72(%rcx)
+ adcq %r12, %rax
+ movq %rbp, 80(%rcx)
+ movq %rax, 88(%rcx)
+ adcq %r13, %rbx
+ movq %rbx, 96(%rcx)
+ adcq (%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 104(%rcx)
+ adcq 8(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 112(%rcx)
+ adcq 16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 120(%rcx)
+ adcq 48(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 128(%rcx)
+ adcq $0, %r8
+ movq %r8, 136(%rcx)
+ addq $808, %rsp ## imm = 0x328
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sqrPre9L
+ .p2align 4, 0x90
+_mcl_fpDbl_sqrPre9L: ## @mcl_fpDbl_sqrPre9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $808, %rsp ## imm = 0x328
+ movq %rsi, %r15
+ movq %rdi, %r14
+ movq (%r15), %rdx
+ leaq 728(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 800(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 792(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 784(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 776(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 768(%rsp), %rax
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq 760(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 752(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 744(%rsp), %rax
+ movq %rax, 80(%rsp) ## 8-byte Spill
+ movq 728(%rsp), %rax
+ movq 736(%rsp), %r12
+ movq %rax, (%r14)
+ movq %r14, 72(%rsp) ## 8-byte Spill
+ movq 8(%r15), %rdx
+ leaq 648(%rsp), %rdi
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 720(%rsp), %r8
+ movq 712(%rsp), %rcx
+ movq 704(%rsp), %rdx
+ movq 696(%rsp), %rsi
+ movq 688(%rsp), %rdi
+ movq 680(%rsp), %rbp
+ addq 648(%rsp), %r12
+ movq 672(%rsp), %rax
+ movq 656(%rsp), %rbx
+ movq 664(%rsp), %r13
+ movq %r12, 8(%r14)
+ adcq 80(%rsp), %rbx ## 8-byte Folded Reload
+ adcq 40(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq %r15, 64(%rsp) ## 8-byte Spill
+ movq 16(%r15), %rdx
+ leaq 568(%rsp), %rdi
+ movq %r15, %rsi
+ callq l_mulPv576x64
+ movq 640(%rsp), %r8
+ movq 632(%rsp), %rcx
+ movq 624(%rsp), %rdx
+ movq 616(%rsp), %rsi
+ movq 608(%rsp), %rdi
+ movq 600(%rsp), %rbp
+ addq 568(%rsp), %rbx
+ movq 592(%rsp), %rax
+ movq 576(%rsp), %r14
+ movq 584(%rsp), %r12
+ movq 72(%rsp), %r15 ## 8-byte Reload
+ movq %rbx, 16(%r15)
+ adcq %r13, %r14
+ adcq 40(%rsp), %r12 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 24(%rsi), %rdx
+ leaq 488(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 560(%rsp), %r8
+ movq 552(%rsp), %rcx
+ movq 544(%rsp), %rdx
+ movq 536(%rsp), %rsi
+ movq 528(%rsp), %rdi
+ movq 520(%rsp), %rbp
+ addq 488(%rsp), %r14
+ movq 512(%rsp), %rax
+ movq 496(%rsp), %rbx
+ movq 504(%rsp), %r13
+ movq %r14, 24(%r15)
+ adcq %r12, %rbx
+ adcq 40(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 32(%rsi), %rdx
+ leaq 408(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 480(%rsp), %r8
+ movq 472(%rsp), %rcx
+ movq 464(%rsp), %rdx
+ movq 456(%rsp), %rsi
+ movq 448(%rsp), %rdi
+ movq 440(%rsp), %rbp
+ addq 408(%rsp), %rbx
+ movq 432(%rsp), %rax
+ movq 416(%rsp), %r14
+ movq 424(%rsp), %r12
+ movq %rbx, 32(%r15)
+ adcq %r13, %r14
+ adcq 40(%rsp), %r12 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 40(%rsi), %rdx
+ leaq 328(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 400(%rsp), %r8
+ movq 392(%rsp), %rcx
+ movq 384(%rsp), %rdx
+ movq 376(%rsp), %rsi
+ movq 368(%rsp), %rdi
+ movq 360(%rsp), %rbp
+ addq 328(%rsp), %r14
+ movq 352(%rsp), %rax
+ movq 336(%rsp), %rbx
+ movq 344(%rsp), %r13
+ movq %r14, 40(%r15)
+ adcq %r12, %rbx
+ adcq 40(%rsp), %r13 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 48(%rsi), %rdx
+ leaq 248(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 320(%rsp), %r8
+ movq 312(%rsp), %rcx
+ movq 304(%rsp), %rdx
+ movq 296(%rsp), %rsi
+ movq 288(%rsp), %rdi
+ movq 280(%rsp), %rbp
+ addq 248(%rsp), %rbx
+ movq 272(%rsp), %rax
+ movq 256(%rsp), %r12
+ movq 264(%rsp), %r14
+ movq %rbx, 48(%r15)
+ adcq %r13, %r12
+ adcq 40(%rsp), %r14 ## 8-byte Folded Reload
+ adcq 48(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 56(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%rsp) ## 8-byte Spill
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 56(%rsi), %rdx
+ leaq 168(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 240(%rsp), %r8
+ movq 232(%rsp), %rdx
+ movq 224(%rsp), %rsi
+ movq 216(%rsp), %rdi
+ movq 208(%rsp), %rbx
+ movq 200(%rsp), %rcx
+ addq 168(%rsp), %r12
+ movq 192(%rsp), %r15
+ movq 176(%rsp), %r13
+ movq 184(%rsp), %rbp
+ movq 72(%rsp), %rax ## 8-byte Reload
+ movq %r12, 56(%rax)
+ adcq %r14, %r13
+ adcq 40(%rsp), %rbp ## 8-byte Folded Reload
+ adcq 48(%rsp), %r15 ## 8-byte Folded Reload
+ adcq 56(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, %r12
+ adcq 8(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, %r14
+ adcq 16(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 8(%rsp) ## 8-byte Spill
+ adcq 24(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 16(%rsp) ## 8-byte Spill
+ adcq 32(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 24(%rsp) ## 8-byte Spill
+ adcq $0, %r8
+ movq %r8, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rsi ## 8-byte Reload
+ movq 64(%rsi), %rdx
+ leaq 88(%rsp), %rdi
+ callq l_mulPv576x64
+ addq 88(%rsp), %r13
+ adcq 96(%rsp), %rbp
+ movq 160(%rsp), %r8
+ adcq 104(%rsp), %r15
+ movq 152(%rsp), %r9
+ movq 144(%rsp), %rdx
+ movq 136(%rsp), %rsi
+ movq 128(%rsp), %rdi
+ movq 120(%rsp), %rbx
+ movq 112(%rsp), %rax
+ movq 72(%rsp), %rcx ## 8-byte Reload
+ movq %r13, 64(%rcx)
+ movq %rbp, 72(%rcx)
+ adcq %r12, %rax
+ movq %r15, 80(%rcx)
+ movq %rax, 88(%rcx)
+ adcq %r14, %rbx
+ movq %rbx, 96(%rcx)
+ adcq 8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 104(%rcx)
+ adcq 16(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 112(%rcx)
+ adcq 24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 120(%rcx)
+ adcq 32(%rsp), %r9 ## 8-byte Folded Reload
+ movq %r9, 128(%rcx)
+ adcq $0, %r8
+ movq %r8, 136(%rcx)
+ addq $808, %rsp ## imm = 0x328
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_mont9L
+ .p2align 4, 0x90
+_mcl_fp_mont9L: ## @mcl_fp_mont9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1560, %rsp ## imm = 0x618
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq %rdx, 96(%rsp) ## 8-byte Spill
+ movq %rsi, 88(%rsp) ## 8-byte Spill
+ movq %rdi, 112(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 80(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1480(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 1480(%rsp), %r14
+ movq 1488(%rsp), %r15
+ movq %r14, %rdx
+ imulq %rbx, %rdx
+ movq 1552(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 1544(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 1536(%rsp), %rax
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq 1528(%rsp), %r12
+ movq 1520(%rsp), %r13
+ movq 1512(%rsp), %rbx
+ movq 1504(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 1496(%rsp), %rbp
+ leaq 1400(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 1400(%rsp), %r14
+ adcq 1408(%rsp), %r15
+ adcq 1416(%rsp), %rbp
+ movq %rbp, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 1424(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 1432(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 1440(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ adcq 1448(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %rbx ## 8-byte Reload
+ adcq 1456(%rsp), %rbx
+ movq 40(%rsp), %r14 ## 8-byte Reload
+ adcq 1464(%rsp), %r14
+ movq 24(%rsp), %r13 ## 8-byte Reload
+ adcq 1472(%rsp), %r13
+ sbbq %rbp, %rbp
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1320(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebp
+ addq 1320(%rsp), %r15
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 1328(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 1336(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r12 ## 8-byte Reload
+ adcq 1344(%rsp), %r12
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 1352(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rax ## 8-byte Reload
+ adcq 1360(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ adcq 1368(%rsp), %rbx
+ adcq 1376(%rsp), %r14
+ movq %r14, 40(%rsp) ## 8-byte Spill
+ adcq 1384(%rsp), %r13
+ movq %r13, 24(%rsp) ## 8-byte Spill
+ adcq 1392(%rsp), %rbp
+ sbbq %r14, %r14
+ movq %r15, %rdx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 1240(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq %r14, %rax
+ andl $1, %eax
+ addq 1240(%rsp), %r15
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 1248(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 1256(%rsp), %r14
+ adcq 1264(%rsp), %r12
+ movq %r12, 32(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 1272(%rsp), %r12
+ movq 48(%rsp), %r13 ## 8-byte Reload
+ adcq 1280(%rsp), %r13
+ adcq 1288(%rsp), %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %r15 ## 8-byte Reload
+ adcq 1296(%rsp), %r15
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 1304(%rsp), %rbx
+ adcq 1312(%rsp), %rbp
+ adcq $0, %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 1160(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 8(%rsp), %rax ## 8-byte Reload
+ addq 1160(%rsp), %rax
+ adcq 1168(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r14 ## 8-byte Reload
+ adcq 1176(%rsp), %r14
+ adcq 1184(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ movq %r13, %r12
+ adcq 1192(%rsp), %r12
+ movq 56(%rsp), %rcx ## 8-byte Reload
+ adcq 1200(%rsp), %rcx
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ adcq 1208(%rsp), %r15
+ movq %r15, %r13
+ adcq 1216(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ adcq 1224(%rsp), %rbp
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 1232(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ sbbq %r15, %r15
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 1080(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq %r15, %rax
+ andl $1, %eax
+ addq 1080(%rsp), %rbx
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 1088(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq %r14, %r15
+ adcq 1096(%rsp), %r15
+ movq 16(%rsp), %r14 ## 8-byte Reload
+ adcq 1104(%rsp), %r14
+ movq %r12, %rbx
+ adcq 1112(%rsp), %rbx
+ movq 56(%rsp), %rcx ## 8-byte Reload
+ adcq 1120(%rsp), %rcx
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ adcq 1128(%rsp), %r13
+ movq %r13, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r13 ## 8-byte Reload
+ adcq 1136(%rsp), %r13
+ adcq 1144(%rsp), %rbp
+ movq 64(%rsp), %r12 ## 8-byte Reload
+ adcq 1152(%rsp), %r12
+ adcq $0, %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 1000(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq (%rsp), %rax ## 8-byte Reload
+ addq 1000(%rsp), %rax
+ adcq 1008(%rsp), %r15
+ movq %r15, 32(%rsp) ## 8-byte Spill
+ adcq 1016(%rsp), %r14
+ movq %r14, %r15
+ adcq 1024(%rsp), %rbx
+ movq %rbx, 48(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %r14 ## 8-byte Reload
+ adcq 1032(%rsp), %r14
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 1040(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ adcq 1048(%rsp), %r13
+ movq %r13, 24(%rsp) ## 8-byte Spill
+ adcq 1056(%rsp), %rbp
+ adcq 1064(%rsp), %r12
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 1072(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 920(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 920(%rsp), %r13
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 928(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ adcq 936(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r15 ## 8-byte Reload
+ adcq 944(%rsp), %r15
+ movq %r14, %r13
+ adcq 952(%rsp), %r13
+ movq 40(%rsp), %r14 ## 8-byte Reload
+ adcq 960(%rsp), %r14
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 968(%rsp), %rbx
+ adcq 976(%rsp), %rbp
+ adcq 984(%rsp), %r12
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 992(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 840(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 32(%rsp), %rax ## 8-byte Reload
+ addq 840(%rsp), %rax
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 848(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq 856(%rsp), %r15
+ adcq 864(%rsp), %r13
+ movq %r13, 56(%rsp) ## 8-byte Spill
+ adcq 872(%rsp), %r14
+ movq %r14, 40(%rsp) ## 8-byte Spill
+ adcq 880(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ adcq 888(%rsp), %rbp
+ adcq 896(%rsp), %r12
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq 904(%rsp), %r13
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 912(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 760(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 760(%rsp), %r14
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 768(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq 776(%rsp), %r15
+ movq 56(%rsp), %r14 ## 8-byte Reload
+ adcq 784(%rsp), %r14
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 792(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 800(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 808(%rsp), %rbp
+ movq %r12, %rbx
+ adcq 816(%rsp), %rbx
+ movq %r13, %r12
+ adcq 824(%rsp), %r12
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 832(%rsp), %r13
+ adcq $0, %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 680(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 16(%rsp), %rax ## 8-byte Reload
+ addq 680(%rsp), %rax
+ adcq 688(%rsp), %r15
+ movq %r15, 48(%rsp) ## 8-byte Spill
+ adcq 696(%rsp), %r14
+ movq %r14, 56(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 704(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r15 ## 8-byte Reload
+ adcq 712(%rsp), %r15
+ adcq 720(%rsp), %rbp
+ adcq 728(%rsp), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ adcq 736(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ adcq 744(%rsp), %r13
+ movq %r13, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r13 ## 8-byte Reload
+ adcq 752(%rsp), %r13
+ sbbq %r14, %r14
+ movq %rax, %rdx
+ movq %rax, %rbx
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 600(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %r14d
+ addq 600(%rsp), %rbx
+ movq 48(%rsp), %rax ## 8-byte Reload
+ adcq 608(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %rax ## 8-byte Reload
+ adcq 616(%rsp), %rax
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rbx ## 8-byte Reload
+ adcq 624(%rsp), %rbx
+ adcq 632(%rsp), %r15
+ movq %r15, 24(%rsp) ## 8-byte Spill
+ adcq 640(%rsp), %rbp
+ movq 64(%rsp), %r12 ## 8-byte Reload
+ adcq 648(%rsp), %r12
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 656(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r15 ## 8-byte Reload
+ adcq 664(%rsp), %r15
+ adcq 672(%rsp), %r13
+ adcq $0, %r14
+ movq %r14, 16(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 520(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 48(%rsp), %rax ## 8-byte Reload
+ addq 520(%rsp), %rax
+ movq 56(%rsp), %r14 ## 8-byte Reload
+ adcq 528(%rsp), %r14
+ adcq 536(%rsp), %rbx
+ movq %rbx, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 544(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 552(%rsp), %rbp
+ adcq 560(%rsp), %r12
+ movq %r12, 64(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r12 ## 8-byte Reload
+ adcq 568(%rsp), %r12
+ adcq 576(%rsp), %r15
+ movq %r15, (%rsp) ## 8-byte Spill
+ adcq 584(%rsp), %r13
+ movq %r13, 32(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r15 ## 8-byte Reload
+ adcq 592(%rsp), %r15
+ sbbq %rbx, %rbx
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 440(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebx
+ movq %rbx, %rax
+ addq 440(%rsp), %r13
+ adcq 448(%rsp), %r14
+ movq %r14, 56(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %r14 ## 8-byte Reload
+ adcq 456(%rsp), %r14
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 464(%rsp), %rbx
+ adcq 472(%rsp), %rbp
+ movq %rbp, 104(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ adcq 488(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 496(%rsp), %rbp
+ movq 32(%rsp), %r12 ## 8-byte Reload
+ adcq 504(%rsp), %r12
+ adcq 512(%rsp), %r15
+ movq %r15, %r13
+ adcq $0, %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 360(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 56(%rsp), %rax ## 8-byte Reload
+ addq 360(%rsp), %rax
+ adcq 368(%rsp), %r14
+ adcq 376(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ movq 104(%rsp), %rcx ## 8-byte Reload
+ adcq 384(%rsp), %rcx
+ movq %rcx, 104(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rbx ## 8-byte Reload
+ adcq 392(%rsp), %rbx
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ adcq 400(%rsp), %r15
+ adcq 408(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 416(%rsp), %r12
+ movq %r12, %rbp
+ adcq 424(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ sbbq %r13, %r13
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 80(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 280(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %r13d
+ addq 280(%rsp), %r12
+ adcq 288(%rsp), %r14
+ movq %r14, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 296(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 104(%rsp), %r14 ## 8-byte Reload
+ adcq 304(%rsp), %r14
+ adcq 312(%rsp), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ adcq 320(%rsp), %r15
+ movq %r15, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %rbx ## 8-byte Reload
+ adcq 328(%rsp), %rbx
+ adcq 336(%rsp), %rbp
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r12 ## 8-byte Reload
+ adcq 344(%rsp), %r12
+ movq 48(%rsp), %rbp ## 8-byte Reload
+ adcq 352(%rsp), %rbp
+ adcq $0, %r13
+ movq 96(%rsp), %rax ## 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 200(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 40(%rsp), %rax ## 8-byte Reload
+ addq 200(%rsp), %rax
+ movq 24(%rsp), %r15 ## 8-byte Reload
+ adcq 208(%rsp), %r15
+ adcq 216(%rsp), %r14
+ movq %r14, 104(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %r14 ## 8-byte Reload
+ adcq 224(%rsp), %r14
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 232(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ adcq 240(%rsp), %rbx
+ movq %rbx, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 248(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ adcq 256(%rsp), %r12
+ movq %r12, 16(%rsp) ## 8-byte Spill
+ adcq 264(%rsp), %rbp
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 272(%rsp), %r13
+ sbbq %rbx, %rbx
+ movq 80(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %r12
+ leaq 120(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %ebx
+ addq 120(%rsp), %r12
+ adcq 128(%rsp), %r15
+ movq 104(%rsp), %rbp ## 8-byte Reload
+ adcq 136(%rsp), %rbp
+ movq %r14, %rcx
+ adcq 144(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r8 ## 8-byte Reload
+ adcq 152(%rsp), %r8
+ movq %r8, 8(%rsp) ## 8-byte Spill
+ movq (%rsp), %r9 ## 8-byte Reload
+ adcq 160(%rsp), %r9
+ movq %r9, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r10 ## 8-byte Reload
+ adcq 168(%rsp), %r10
+ movq %r10, 32(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rdi ## 8-byte Reload
+ adcq 176(%rsp), %rdi
+ movq %rdi, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r14 ## 8-byte Reload
+ adcq 184(%rsp), %r14
+ adcq 192(%rsp), %r13
+ adcq $0, %rbx
+ movq %r15, %rsi
+ movq %r15, %r12
+ movq 72(%rsp), %rdx ## 8-byte Reload
+ subq (%rdx), %rsi
+ movq %rbp, %rax
+ movq %rbp, %r15
+ sbbq 8(%rdx), %rax
+ movq %rcx, %rbp
+ sbbq 16(%rdx), %rbp
+ movq %r8, %rcx
+ sbbq 24(%rdx), %rcx
+ movq %r9, %r8
+ sbbq 32(%rdx), %r8
+ movq %r10, %r11
+ sbbq 40(%rdx), %r11
+ movq %rdi, %r10
+ sbbq 48(%rdx), %r10
+ movq %r14, %rdi
+ sbbq 56(%rdx), %rdi
+ movq %r13, %r9
+ sbbq 64(%rdx), %r9
+ sbbq $0, %rbx
+ andl $1, %ebx
+ cmovneq %r13, %r9
+ testb %bl, %bl
+ cmovneq %r12, %rsi
+ movq 112(%rsp), %rbx ## 8-byte Reload
+ movq %rsi, (%rbx)
+ cmovneq %r15, %rax
+ movq %rax, 8(%rbx)
+ cmovneq 64(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 16(%rbx)
+ cmovneq 8(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 24(%rbx)
+ cmovneq (%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 32(%rbx)
+ cmovneq 32(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 40(%rbx)
+ cmovneq 16(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 48(%rbx)
+ cmovneq %r14, %rdi
+ movq %rdi, 56(%rbx)
+ movq %r9, 64(%rbx)
+ addq $1560, %rsp ## imm = 0x618
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montNF9L
+ .p2align 4, 0x90
+_mcl_fp_montNF9L: ## @mcl_fp_montNF9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $1560, %rsp ## imm = 0x618
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq %rdx, 80(%rsp) ## 8-byte Spill
+ movq %rsi, 88(%rsp) ## 8-byte Spill
+ movq %rdi, 112(%rsp) ## 8-byte Spill
+ movq -8(%rcx), %rbx
+ movq %rbx, 96(%rsp) ## 8-byte Spill
+ movq (%rdx), %rdx
+ leaq 1480(%rsp), %rdi
+ callq l_mulPv576x64
+ movq 1480(%rsp), %r12
+ movq 1488(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq %r12, %rdx
+ imulq %rbx, %rdx
+ movq 1552(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ movq 1544(%rsp), %r13
+ movq 1536(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ movq 1528(%rsp), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ movq 1520(%rsp), %r14
+ movq 1512(%rsp), %r15
+ movq 1504(%rsp), %rbx
+ movq 1496(%rsp), %rbp
+ leaq 1400(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 1400(%rsp), %r12
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 1408(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ adcq 1416(%rsp), %rbp
+ movq %rbp, 104(%rsp) ## 8-byte Spill
+ adcq 1424(%rsp), %rbx
+ movq %rbx, (%rsp) ## 8-byte Spill
+ adcq 1432(%rsp), %r15
+ movq %r15, 8(%rsp) ## 8-byte Spill
+ adcq 1440(%rsp), %r14
+ movq %r14, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rbx ## 8-byte Reload
+ adcq 1448(%rsp), %rbx
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 1456(%rsp), %r12
+ adcq 1464(%rsp), %r13
+ movq %r13, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 1472(%rsp), %rbp
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 8(%rax), %rdx
+ leaq 1320(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 1392(%rsp), %rax
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ addq 1320(%rsp), %rcx
+ movq 104(%rsp), %r15 ## 8-byte Reload
+ adcq 1328(%rsp), %r15
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 1336(%rsp), %r14
+ movq 8(%rsp), %rdx ## 8-byte Reload
+ adcq 1344(%rsp), %rdx
+ movq %rdx, 8(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %r13 ## 8-byte Reload
+ adcq 1352(%rsp), %r13
+ adcq 1360(%rsp), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ adcq 1368(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rdx ## 8-byte Reload
+ adcq 1376(%rsp), %rdx
+ movq %rdx, 40(%rsp) ## 8-byte Spill
+ adcq 1384(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, %rbp
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 1240(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 1240(%rsp), %rbx
+ adcq 1248(%rsp), %r15
+ movq %r15, 104(%rsp) ## 8-byte Spill
+ adcq 1256(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r12 ## 8-byte Reload
+ adcq 1264(%rsp), %r12
+ adcq 1272(%rsp), %r13
+ movq %r13, %r14
+ movq 64(%rsp), %r13 ## 8-byte Reload
+ adcq 1280(%rsp), %r13
+ movq 48(%rsp), %rbx ## 8-byte Reload
+ adcq 1288(%rsp), %rbx
+ movq 40(%rsp), %r15 ## 8-byte Reload
+ adcq 1296(%rsp), %r15
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 1304(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 1312(%rsp), %rbp
+ movq %rbp, 56(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 16(%rax), %rdx
+ leaq 1160(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 1232(%rsp), %rax
+ movq 104(%rsp), %rcx ## 8-byte Reload
+ addq 1160(%rsp), %rcx
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 1168(%rsp), %rbp
+ adcq 1176(%rsp), %r12
+ movq %r12, 8(%rsp) ## 8-byte Spill
+ adcq 1184(%rsp), %r14
+ adcq 1192(%rsp), %r13
+ movq %r13, %r12
+ adcq 1200(%rsp), %rbx
+ movq %rbx, 48(%rsp) ## 8-byte Spill
+ adcq 1208(%rsp), %r15
+ movq %r15, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbx ## 8-byte Reload
+ adcq 1216(%rsp), %rbx
+ movq 56(%rsp), %rdx ## 8-byte Reload
+ adcq 1224(%rsp), %rdx
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ movq %rax, %r15
+ adcq $0, %r15
+ movq %rcx, %rdx
+ movq %rcx, %r13
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 1080(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 1080(%rsp), %r13
+ adcq 1088(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq 1096(%rsp), %r13
+ adcq 1104(%rsp), %r14
+ adcq 1112(%rsp), %r12
+ movq %r12, 64(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 1120(%rsp), %r12
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 1128(%rsp), %rbp
+ adcq 1136(%rsp), %rbx
+ movq %rbx, 24(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %rbx ## 8-byte Reload
+ adcq 1144(%rsp), %rbx
+ adcq 1152(%rsp), %r15
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 24(%rax), %rdx
+ leaq 1000(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 1072(%rsp), %rax
+ movq (%rsp), %rcx ## 8-byte Reload
+ addq 1000(%rsp), %rcx
+ adcq 1008(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ adcq 1016(%rsp), %r14
+ movq %r14, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %r14 ## 8-byte Reload
+ adcq 1024(%rsp), %r14
+ adcq 1032(%rsp), %r12
+ adcq 1040(%rsp), %rbp
+ movq %rbp, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %r13 ## 8-byte Reload
+ adcq 1048(%rsp), %r13
+ adcq 1056(%rsp), %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ adcq 1064(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 920(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 920(%rsp), %rbx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 928(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rbp ## 8-byte Reload
+ adcq 936(%rsp), %rbp
+ movq %r14, %rbx
+ adcq 944(%rsp), %rbx
+ adcq 952(%rsp), %r12
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 960(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 968(%rsp), %r13
+ movq %r13, %r15
+ movq 56(%rsp), %r13 ## 8-byte Reload
+ adcq 976(%rsp), %r13
+ movq 16(%rsp), %r14 ## 8-byte Reload
+ adcq 984(%rsp), %r14
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 992(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 32(%rax), %rdx
+ leaq 840(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 912(%rsp), %rax
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ addq 840(%rsp), %rcx
+ adcq 848(%rsp), %rbp
+ movq %rbp, 32(%rsp) ## 8-byte Spill
+ adcq 856(%rsp), %rbx
+ movq %rbx, 64(%rsp) ## 8-byte Spill
+ adcq 864(%rsp), %r12
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 872(%rsp), %rbp
+ adcq 880(%rsp), %r15
+ movq %r15, 24(%rsp) ## 8-byte Spill
+ adcq 888(%rsp), %r13
+ adcq 896(%rsp), %r14
+ movq %r14, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %rdx ## 8-byte Reload
+ adcq 904(%rsp), %rdx
+ movq %rdx, (%rsp) ## 8-byte Spill
+ adcq $0, %rax
+ movq %rax, %r14
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 760(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 760(%rsp), %rbx
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 768(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %r15 ## 8-byte Reload
+ adcq 776(%rsp), %r15
+ adcq 784(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ movq %rbp, %rbx
+ adcq 792(%rsp), %rbx
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 800(%rsp), %rbp
+ adcq 808(%rsp), %r13
+ movq 16(%rsp), %rax ## 8-byte Reload
+ adcq 816(%rsp), %rax
+ movq %rax, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %r12 ## 8-byte Reload
+ adcq 824(%rsp), %r12
+ adcq 832(%rsp), %r14
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 40(%rax), %rdx
+ leaq 680(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 752(%rsp), %rcx
+ movq 32(%rsp), %rax ## 8-byte Reload
+ addq 680(%rsp), %rax
+ adcq 688(%rsp), %r15
+ movq %r15, 64(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rdx ## 8-byte Reload
+ adcq 696(%rsp), %rdx
+ movq %rdx, 48(%rsp) ## 8-byte Spill
+ adcq 704(%rsp), %rbx
+ movq %rbx, 40(%rsp) ## 8-byte Spill
+ adcq 712(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ adcq 720(%rsp), %r13
+ movq %r13, %r15
+ movq 16(%rsp), %rbx ## 8-byte Reload
+ adcq 728(%rsp), %rbx
+ adcq 736(%rsp), %r12
+ movq %r12, (%rsp) ## 8-byte Spill
+ adcq 744(%rsp), %r14
+ movq %r14, 32(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r13
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 600(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 600(%rsp), %r13
+ movq 64(%rsp), %r13 ## 8-byte Reload
+ adcq 608(%rsp), %r13
+ movq 48(%rsp), %r12 ## 8-byte Reload
+ adcq 616(%rsp), %r12
+ movq 40(%rsp), %rbp ## 8-byte Reload
+ adcq 624(%rsp), %rbp
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 632(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 640(%rsp), %r15
+ movq %r15, 56(%rsp) ## 8-byte Spill
+ adcq 648(%rsp), %rbx
+ movq %rbx, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 656(%rsp), %r14
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 664(%rsp), %rbx
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ adcq 672(%rsp), %r15
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 48(%rax), %rdx
+ leaq 520(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 592(%rsp), %rcx
+ movq %r13, %rax
+ addq 520(%rsp), %rax
+ adcq 528(%rsp), %r12
+ movq %r12, 48(%rsp) ## 8-byte Spill
+ movq %rbp, %r12
+ adcq 536(%rsp), %r12
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 544(%rsp), %rbp
+ movq 56(%rsp), %rdx ## 8-byte Reload
+ adcq 552(%rsp), %rdx
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rdx ## 8-byte Reload
+ adcq 560(%rsp), %rdx
+ movq %rdx, 16(%rsp) ## 8-byte Spill
+ adcq 568(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ adcq 576(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ adcq 584(%rsp), %r15
+ movq %r15, 8(%rsp) ## 8-byte Spill
+ adcq $0, %rcx
+ movq %rcx, %r13
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 440(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 440(%rsp), %r14
+ movq 48(%rsp), %rax ## 8-byte Reload
+ adcq 448(%rsp), %rax
+ movq %rax, 48(%rsp) ## 8-byte Spill
+ adcq 456(%rsp), %r12
+ adcq 464(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %r14 ## 8-byte Reload
+ adcq 472(%rsp), %r14
+ movq 16(%rsp), %r15 ## 8-byte Reload
+ adcq 480(%rsp), %r15
+ movq (%rsp), %rbp ## 8-byte Reload
+ adcq 488(%rsp), %rbp
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 496(%rsp), %rbx
+ movq 8(%rsp), %rax ## 8-byte Reload
+ adcq 504(%rsp), %rax
+ movq %rax, 8(%rsp) ## 8-byte Spill
+ adcq 512(%rsp), %r13
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 56(%rax), %rdx
+ leaq 360(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 432(%rsp), %rcx
+ movq 48(%rsp), %rax ## 8-byte Reload
+ addq 360(%rsp), %rax
+ adcq 368(%rsp), %r12
+ movq %r12, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rdx ## 8-byte Reload
+ adcq 376(%rsp), %rdx
+ movq %rdx, 24(%rsp) ## 8-byte Spill
+ adcq 384(%rsp), %r14
+ movq %r14, 56(%rsp) ## 8-byte Spill
+ adcq 392(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ adcq 400(%rsp), %rbp
+ movq %rbp, (%rsp) ## 8-byte Spill
+ adcq 408(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r14 ## 8-byte Reload
+ adcq 416(%rsp), %r14
+ adcq 424(%rsp), %r13
+ movq %r13, %r15
+ adcq $0, %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq %rax, %rdx
+ movq %rax, %r12
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 280(%rsp), %rdi
+ movq 72(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 280(%rsp), %r12
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 288(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 296(%rsp), %rbp
+ movq 56(%rsp), %rax ## 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq %rax, 56(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r13 ## 8-byte Reload
+ adcq 312(%rsp), %r13
+ movq (%rsp), %r12 ## 8-byte Reload
+ adcq 320(%rsp), %r12
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 328(%rsp), %rbx
+ adcq 336(%rsp), %r14
+ movq %r14, 8(%rsp) ## 8-byte Spill
+ adcq 344(%rsp), %r15
+ movq %r15, 64(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %r14 ## 8-byte Reload
+ adcq 352(%rsp), %r14
+ movq 80(%rsp), %rax ## 8-byte Reload
+ movq 64(%rax), %rdx
+ leaq 200(%rsp), %rdi
+ movq 88(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ movq 272(%rsp), %rcx
+ movq 40(%rsp), %rax ## 8-byte Reload
+ addq 200(%rsp), %rax
+ adcq 208(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 56(%rsp), %rbp ## 8-byte Reload
+ adcq 216(%rsp), %rbp
+ adcq 224(%rsp), %r13
+ movq %r13, 16(%rsp) ## 8-byte Spill
+ adcq 232(%rsp), %r12
+ movq %r12, (%rsp) ## 8-byte Spill
+ adcq 240(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r15 ## 8-byte Reload
+ adcq 248(%rsp), %r15
+ movq 64(%rsp), %r12 ## 8-byte Reload
+ adcq 256(%rsp), %r12
+ adcq 264(%rsp), %r14
+ adcq $0, %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 96(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbx
+ leaq 120(%rsp), %rdi
+ movq 72(%rsp), %r13 ## 8-byte Reload
+ movq %r13, %rsi
+ callq l_mulPv576x64
+ addq 120(%rsp), %rbx
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 128(%rsp), %rcx
+ movq %rbp, %rdx
+ adcq 136(%rsp), %rdx
+ movq 16(%rsp), %rsi ## 8-byte Reload
+ adcq 144(%rsp), %rsi
+ movq %rsi, 16(%rsp) ## 8-byte Spill
+ movq (%rsp), %rdi ## 8-byte Reload
+ adcq 152(%rsp), %rdi
+ movq %rdi, (%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rbx ## 8-byte Reload
+ adcq 160(%rsp), %rbx
+ movq %rbx, 32(%rsp) ## 8-byte Spill
+ movq %r15, %r8
+ adcq 168(%rsp), %r8
+ movq %r8, 8(%rsp) ## 8-byte Spill
+ movq %r12, %r15
+ adcq 176(%rsp), %r15
+ adcq 184(%rsp), %r14
+ movq 40(%rsp), %r9 ## 8-byte Reload
+ adcq 192(%rsp), %r9
+ movq %rcx, %rax
+ movq %rcx, %r11
+ movq %r13, %rbp
+ subq (%rbp), %rax
+ movq %rdx, %rcx
+ movq %rdx, %r12
+ sbbq 8(%rbp), %rcx
+ movq %rsi, %rdx
+ sbbq 16(%rbp), %rdx
+ movq %rdi, %rsi
+ sbbq 24(%rbp), %rsi
+ movq %rbx, %rdi
+ sbbq 32(%rbp), %rdi
+ movq %r8, %r10
+ sbbq 40(%rbp), %r10
+ movq %r15, %r13
+ sbbq 48(%rbp), %r13
+ movq %r14, %r8
+ sbbq 56(%rbp), %r8
+ movq %rbp, %rbx
+ movq %r9, %rbp
+ sbbq 64(%rbx), %rbp
+ movq %rbp, %rbx
+ sarq $63, %rbx
+ cmovsq %r11, %rax
+ movq 112(%rsp), %rbx ## 8-byte Reload
+ movq %rax, (%rbx)
+ cmovsq %r12, %rcx
+ movq %rcx, 8(%rbx)
+ cmovsq 16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 16(%rbx)
+ cmovsq (%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 24(%rbx)
+ cmovsq 32(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 32(%rbx)
+ cmovsq 8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 40(%rbx)
+ cmovsq %r15, %r13
+ movq %r13, 48(%rbx)
+ cmovsq %r14, %r8
+ movq %r8, 56(%rbx)
+ cmovsq %r9, %rbp
+ movq %rbp, 64(%rbx)
+ addq $1560, %rsp ## imm = 0x618
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_montRed9L
+ .p2align 4, 0x90
+_mcl_fp_montRed9L: ## @mcl_fp_montRed9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ subq $936, %rsp ## imm = 0x3A8
+ movq %rdx, %rax
+ movq %rdi, 208(%rsp) ## 8-byte Spill
+ movq -8(%rax), %rcx
+ movq %rcx, 96(%rsp) ## 8-byte Spill
+ movq (%rsi), %r14
+ movq 8(%rsi), %rdx
+ movq %rdx, (%rsp) ## 8-byte Spill
+ movq %r14, %rdx
+ imulq %rcx, %rdx
+ movq 136(%rsi), %rcx
+ movq %rcx, 88(%rsp) ## 8-byte Spill
+ movq 128(%rsi), %rcx
+ movq %rcx, 56(%rsp) ## 8-byte Spill
+ movq 120(%rsi), %rcx
+ movq %rcx, 80(%rsp) ## 8-byte Spill
+ movq 112(%rsi), %rcx
+ movq %rcx, 72(%rsp) ## 8-byte Spill
+ movq 104(%rsi), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq 96(%rsi), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 88(%rsi), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 80(%rsi), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ movq 72(%rsi), %r12
+ movq 64(%rsi), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq 56(%rsi), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq 48(%rsi), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 40(%rsi), %rbp
+ movq 32(%rsi), %rbx
+ movq 24(%rsi), %r13
+ movq 16(%rsi), %r15
+ movq %rax, %rcx
+ movq (%rcx), %rax
+ movq %rax, 144(%rsp) ## 8-byte Spill
+ movq 64(%rcx), %rax
+ movq %rax, 200(%rsp) ## 8-byte Spill
+ movq 56(%rcx), %rax
+ movq %rax, 192(%rsp) ## 8-byte Spill
+ movq 48(%rcx), %rax
+ movq %rax, 184(%rsp) ## 8-byte Spill
+ movq 40(%rcx), %rax
+ movq %rax, 176(%rsp) ## 8-byte Spill
+ movq 32(%rcx), %rax
+ movq %rax, 168(%rsp) ## 8-byte Spill
+ movq 24(%rcx), %rax
+ movq %rax, 160(%rsp) ## 8-byte Spill
+ movq 16(%rcx), %rax
+ movq %rax, 152(%rsp) ## 8-byte Spill
+ movq 8(%rcx), %rax
+ movq %rax, 136(%rsp) ## 8-byte Spill
+ movq %rcx, %rsi
+ movq %rsi, 104(%rsp) ## 8-byte Spill
+ leaq 856(%rsp), %rdi
+ callq l_mulPv576x64
+ addq 856(%rsp), %r14
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 864(%rsp), %rcx
+ adcq 872(%rsp), %r15
+ adcq 880(%rsp), %r13
+ adcq 888(%rsp), %rbx
+ movq %rbx, 120(%rsp) ## 8-byte Spill
+ adcq 896(%rsp), %rbp
+ movq %rbp, 112(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ adcq 904(%rsp), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 912(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 920(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ adcq 928(%rsp), %r12
+ movq %r12, (%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq $0, %rbp
+ adcq $0, 8(%rsp) ## 8-byte Folded Spill
+ adcq $0, 16(%rsp) ## 8-byte Folded Spill
+ adcq $0, 48(%rsp) ## 8-byte Folded Spill
+ adcq $0, 72(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 56(%rsp) ## 8-byte Folded Spill
+ movq 88(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ sbbq %r12, %r12
+ movq %rcx, %rdx
+ movq %rcx, %rbx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 776(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ andl $1, %r12d
+ addq 776(%rsp), %rbx
+ adcq 784(%rsp), %r15
+ adcq 792(%rsp), %r13
+ movq %r13, 128(%rsp) ## 8-byte Spill
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 800(%rsp), %rax
+ movq %rax, 120(%rsp) ## 8-byte Spill
+ movq 112(%rsp), %rax ## 8-byte Reload
+ adcq 808(%rsp), %rax
+ movq %rax, 112(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rax ## 8-byte Reload
+ adcq 816(%rsp), %rax
+ movq %rax, 64(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 824(%rsp), %rax
+ movq %rax, 32(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 832(%rsp), %rax
+ movq %rax, 40(%rsp) ## 8-byte Spill
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 840(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ adcq 848(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq $0, %r13
+ adcq $0, 16(%rsp) ## 8-byte Folded Spill
+ adcq $0, 48(%rsp) ## 8-byte Folded Spill
+ adcq $0, 72(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ movq 56(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, %r14
+ movq %r14, 88(%rsp) ## 8-byte Spill
+ adcq $0, %r12
+ movq %r15, %rdx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 696(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 696(%rsp), %r15
+ movq 128(%rsp), %rcx ## 8-byte Reload
+ adcq 704(%rsp), %rcx
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 712(%rsp), %rax
+ movq %rax, 120(%rsp) ## 8-byte Spill
+ movq 112(%rsp), %rax ## 8-byte Reload
+ adcq 720(%rsp), %rax
+ movq %rax, 112(%rsp) ## 8-byte Spill
+ movq 64(%rsp), %rbp ## 8-byte Reload
+ adcq 728(%rsp), %rbp
+ movq 32(%rsp), %r14 ## 8-byte Reload
+ adcq 736(%rsp), %r14
+ movq 40(%rsp), %r15 ## 8-byte Reload
+ adcq 744(%rsp), %r15
+ movq (%rsp), %rax ## 8-byte Reload
+ adcq 752(%rsp), %rax
+ movq %rax, (%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rax ## 8-byte Reload
+ adcq 760(%rsp), %rax
+ movq %rax, 24(%rsp) ## 8-byte Spill
+ adcq 768(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ adcq $0, 16(%rsp) ## 8-byte Folded Spill
+ movq 48(%rsp), %r13 ## 8-byte Reload
+ adcq $0, %r13
+ adcq $0, 72(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ adcq $0, %rbx
+ movq %rbx, 56(%rsp) ## 8-byte Spill
+ adcq $0, 88(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rcx, %rbx
+ movq %rbx, %rdx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 616(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 616(%rsp), %rbx
+ movq 120(%rsp), %rax ## 8-byte Reload
+ adcq 624(%rsp), %rax
+ movq 112(%rsp), %rcx ## 8-byte Reload
+ adcq 632(%rsp), %rcx
+ movq %rcx, 112(%rsp) ## 8-byte Spill
+ adcq 640(%rsp), %rbp
+ movq %rbp, 64(%rsp) ## 8-byte Spill
+ adcq 648(%rsp), %r14
+ movq %r14, 32(%rsp) ## 8-byte Spill
+ adcq 656(%rsp), %r15
+ movq (%rsp), %r14 ## 8-byte Reload
+ adcq 664(%rsp), %r14
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 672(%rsp), %rbp
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 680(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 688(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ adcq $0, %r13
+ movq %r13, 48(%rsp) ## 8-byte Spill
+ adcq $0, 72(%rsp) ## 8-byte Folded Spill
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 56(%rsp) ## 8-byte Folded Spill
+ adcq $0, 88(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 536(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 536(%rsp), %rbx
+ movq 112(%rsp), %rax ## 8-byte Reload
+ adcq 544(%rsp), %rax
+ movq 64(%rsp), %rcx ## 8-byte Reload
+ adcq 552(%rsp), %rcx
+ movq %rcx, 64(%rsp) ## 8-byte Spill
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 560(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ adcq 568(%rsp), %r15
+ movq %r15, 40(%rsp) ## 8-byte Spill
+ adcq 576(%rsp), %r14
+ movq %r14, (%rsp) ## 8-byte Spill
+ adcq 584(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r13 ## 8-byte Reload
+ adcq 592(%rsp), %r13
+ movq 16(%rsp), %r15 ## 8-byte Reload
+ adcq 600(%rsp), %r15
+ movq 48(%rsp), %rbp ## 8-byte Reload
+ adcq 608(%rsp), %rbp
+ movq 72(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, 80(%rsp) ## 8-byte Folded Spill
+ adcq $0, 56(%rsp) ## 8-byte Folded Spill
+ adcq $0, 88(%rsp) ## 8-byte Folded Spill
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq %rax, %r14
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 456(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 456(%rsp), %r14
+ movq 64(%rsp), %rax ## 8-byte Reload
+ adcq 464(%rsp), %rax
+ movq 32(%rsp), %rcx ## 8-byte Reload
+ adcq 472(%rsp), %rcx
+ movq %rcx, 32(%rsp) ## 8-byte Spill
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 480(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 488(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rcx ## 8-byte Reload
+ adcq 496(%rsp), %rcx
+ movq %rcx, 24(%rsp) ## 8-byte Spill
+ adcq 504(%rsp), %r13
+ movq %r13, 8(%rsp) ## 8-byte Spill
+ adcq 512(%rsp), %r15
+ movq %r15, 16(%rsp) ## 8-byte Spill
+ adcq 520(%rsp), %rbp
+ movq %rbp, 48(%rsp) ## 8-byte Spill
+ adcq 528(%rsp), %rbx
+ movq %rbx, 72(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ movq 56(%rsp), %r13 ## 8-byte Reload
+ adcq $0, %r13
+ movq 88(%rsp), %rbx ## 8-byte Reload
+ adcq $0, %rbx
+ adcq $0, %r12
+ movq %rax, %rdx
+ movq %rax, %r15
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 376(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 376(%rsp), %r15
+ movq 32(%rsp), %rax ## 8-byte Reload
+ adcq 384(%rsp), %rax
+ movq 40(%rsp), %rcx ## 8-byte Reload
+ adcq 392(%rsp), %rcx
+ movq %rcx, 40(%rsp) ## 8-byte Spill
+ movq (%rsp), %rcx ## 8-byte Reload
+ adcq 400(%rsp), %rcx
+ movq %rcx, (%rsp) ## 8-byte Spill
+ movq 24(%rsp), %rbp ## 8-byte Reload
+ adcq 408(%rsp), %rbp
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 416(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 424(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 432(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ movq 72(%rsp), %r15 ## 8-byte Reload
+ adcq 440(%rsp), %r15
+ adcq 448(%rsp), %r14
+ movq %r14, 80(%rsp) ## 8-byte Spill
+ adcq $0, %r13
+ movq %r13, %r14
+ adcq $0, %rbx
+ movq %rbx, 88(%rsp) ## 8-byte Spill
+ adcq $0, %r12
+ movq %rax, %rbx
+ movq %rbx, %rdx
+ imulq 96(%rsp), %rdx ## 8-byte Folded Reload
+ leaq 296(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 296(%rsp), %rbx
+ movq 40(%rsp), %rax ## 8-byte Reload
+ adcq 304(%rsp), %rax
+ movq (%rsp), %r13 ## 8-byte Reload
+ adcq 312(%rsp), %r13
+ adcq 320(%rsp), %rbp
+ movq 8(%rsp), %rcx ## 8-byte Reload
+ adcq 328(%rsp), %rcx
+ movq %rcx, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %rcx ## 8-byte Reload
+ adcq 336(%rsp), %rcx
+ movq %rcx, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rcx ## 8-byte Reload
+ adcq 344(%rsp), %rcx
+ movq %rcx, 48(%rsp) ## 8-byte Spill
+ adcq 352(%rsp), %r15
+ movq %r15, 72(%rsp) ## 8-byte Spill
+ movq 80(%rsp), %r15 ## 8-byte Reload
+ adcq 360(%rsp), %r15
+ adcq 368(%rsp), %r14
+ movq %r14, 56(%rsp) ## 8-byte Spill
+ movq 88(%rsp), %r14 ## 8-byte Reload
+ adcq $0, %r14
+ adcq $0, %r12
+ movq 96(%rsp), %rdx ## 8-byte Reload
+ imulq %rax, %rdx
+ movq %rax, %rbx
+ leaq 216(%rsp), %rdi
+ movq 104(%rsp), %rsi ## 8-byte Reload
+ callq l_mulPv576x64
+ addq 216(%rsp), %rbx
+ movq %r13, %rsi
+ adcq 224(%rsp), %rsi
+ movq %rsi, (%rsp) ## 8-byte Spill
+ adcq 232(%rsp), %rbp
+ movq %rbp, 24(%rsp) ## 8-byte Spill
+ movq 8(%rsp), %r9 ## 8-byte Reload
+ adcq 240(%rsp), %r9
+ movq %r9, 8(%rsp) ## 8-byte Spill
+ movq 16(%rsp), %r8 ## 8-byte Reload
+ adcq 248(%rsp), %r8
+ movq %r8, 16(%rsp) ## 8-byte Spill
+ movq 48(%rsp), %rbx ## 8-byte Reload
+ adcq 256(%rsp), %rbx
+ movq 72(%rsp), %rax ## 8-byte Reload
+ adcq 264(%rsp), %rax
+ movq %r15, %rcx
+ adcq 272(%rsp), %rcx
+ movq 56(%rsp), %rdx ## 8-byte Reload
+ adcq 280(%rsp), %rdx
+ movq %rdx, 56(%rsp) ## 8-byte Spill
+ adcq 288(%rsp), %r14
+ movq %r14, %r11
+ adcq $0, %r12
+ subq 144(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rbp, %rdi
+ sbbq 136(%rsp), %rdi ## 8-byte Folded Reload
+ movq %r9, %rbp
+ sbbq 152(%rsp), %rbp ## 8-byte Folded Reload
+ movq %r8, %r13
+ sbbq 160(%rsp), %r13 ## 8-byte Folded Reload
+ movq %rbx, %r15
+ sbbq 168(%rsp), %r15 ## 8-byte Folded Reload
+ movq %rax, %r14
+ sbbq 176(%rsp), %r14 ## 8-byte Folded Reload
+ movq %rcx, %r10
+ sbbq 184(%rsp), %r10 ## 8-byte Folded Reload
+ movq %rdx, %r8
+ sbbq 192(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r11, %r9
+ sbbq 200(%rsp), %r9 ## 8-byte Folded Reload
+ sbbq $0, %r12
+ andl $1, %r12d
+ cmovneq %r11, %r9
+ testb %r12b, %r12b
+ cmovneq (%rsp), %rsi ## 8-byte Folded Reload
+ movq 208(%rsp), %rdx ## 8-byte Reload
+ movq %rsi, (%rdx)
+ cmovneq 24(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 8(%rdx)
+ cmovneq 8(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 16(%rdx)
+ cmovneq 16(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 24(%rdx)
+ cmovneq %rbx, %r15
+ movq %r15, 32(%rdx)
+ cmovneq %rax, %r14
+ movq %r14, 40(%rdx)
+ cmovneq %rcx, %r10
+ movq %r10, 48(%rdx)
+ cmovneq 56(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 56(%rdx)
+ movq %r9, 64(%rdx)
+ addq $936, %rsp ## imm = 0x3A8
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_addPre9L
+ .p2align 4, 0x90
+_mcl_fp_addPre9L: ## @mcl_fp_addPre9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r8
+ movq 64(%rsi), %r15
+ movq 56(%rsi), %r9
+ movq 48(%rsi), %r10
+ movq 40(%rsi), %r11
+ movq 24(%rsi), %r12
+ movq 32(%rsi), %r14
+ movq (%rdx), %rbx
+ movq 8(%rdx), %rcx
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %rcx
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r12
+ movq 56(%rdx), %r13
+ movq 48(%rdx), %rsi
+ movq 40(%rdx), %rbp
+ movq 32(%rdx), %rdx
+ movq %rbx, (%rdi)
+ movq %rcx, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r12, 24(%rdi)
+ adcq %r14, %rdx
+ movq %rdx, 32(%rdi)
+ adcq %r11, %rbp
+ movq %rbp, 40(%rdi)
+ adcq %r10, %rsi
+ movq %rsi, 48(%rdi)
+ adcq %r9, %r13
+ movq %r13, 56(%rdi)
+ adcq %r8, %r15
+ movq %r15, 64(%rdi)
+ sbbq %rax, %rax
+ andl $1, %eax
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_subPre9L
+ .p2align 4, 0x90
+_mcl_fp_subPre9L: ## @mcl_fp_subPre9L
+## BB#0:
+ movq 32(%rdx), %r8
+ movq (%rsi), %rcx
+ xorl %eax, %eax
+ subq (%rdx), %rcx
+ movq %rcx, (%rdi)
+ movq 8(%rsi), %rcx
+ sbbq 8(%rdx), %rcx
+ movq %rcx, 8(%rdi)
+ movq 16(%rsi), %rcx
+ sbbq 16(%rdx), %rcx
+ movq %rcx, 16(%rdi)
+ movq 24(%rsi), %rcx
+ sbbq 24(%rdx), %rcx
+ movq %rcx, 24(%rdi)
+ movq 32(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 40(%rdx), %r8
+ movq %rcx, 32(%rdi)
+ movq 40(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 48(%rdx), %r8
+ movq %rcx, 40(%rdi)
+ movq 48(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq 56(%rdx), %r8
+ movq %rcx, 48(%rdi)
+ movq 56(%rsi), %rcx
+ sbbq %r8, %rcx
+ movq %rcx, 56(%rdi)
+ movq 64(%rdx), %rcx
+ movq 64(%rsi), %rdx
+ sbbq %rcx, %rdx
+ movq %rdx, 64(%rdi)
+ sbbq $0, %rax
+ andl $1, %eax
+ retq
+
+ .globl _mcl_fp_shr1_9L
+ .p2align 4, 0x90
+_mcl_fp_shr1_9L: ## @mcl_fp_shr1_9L
+## BB#0:
+ pushq %rbx
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %r9
+ movq 48(%rsi), %r10
+ movq 40(%rsi), %r11
+ movq 32(%rsi), %rcx
+ movq 24(%rsi), %rdx
+ movq 16(%rsi), %rax
+ movq (%rsi), %rbx
+ movq 8(%rsi), %rsi
+ shrdq $1, %rsi, %rbx
+ movq %rbx, (%rdi)
+ shrdq $1, %rax, %rsi
+ movq %rsi, 8(%rdi)
+ shrdq $1, %rdx, %rax
+ movq %rax, 16(%rdi)
+ shrdq $1, %rcx, %rdx
+ movq %rdx, 24(%rdi)
+ shrdq $1, %r11, %rcx
+ movq %rcx, 32(%rdi)
+ shrdq $1, %r10, %r11
+ movq %r11, 40(%rdi)
+ shrdq $1, %r9, %r10
+ movq %r10, 48(%rdi)
+ shrdq $1, %r8, %r9
+ movq %r9, 56(%rdi)
+ shrq %r8
+ movq %r8, 64(%rdi)
+ popq %rbx
+ retq
+
+ .globl _mcl_fp_add9L
+ .p2align 4, 0x90
+_mcl_fp_add9L: ## @mcl_fp_add9L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r12
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %r13
+ movq 48(%rsi), %r9
+ movq 40(%rsi), %r10
+ movq 24(%rsi), %r14
+ movq 32(%rsi), %r11
+ movq (%rdx), %rbx
+ movq 8(%rdx), %r15
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %r15
+ movq 16(%rdx), %rax
+ adcq 16(%rsi), %rax
+ adcq 24(%rdx), %r14
+ adcq 32(%rdx), %r11
+ adcq 40(%rdx), %r10
+ movq 56(%rdx), %rsi
+ adcq 48(%rdx), %r9
+ movq %rbx, (%rdi)
+ movq %r15, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r14, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ adcq %r13, %rsi
+ movq %rsi, 56(%rdi)
+ adcq %r12, %r8
+ movq %r8, 64(%rdi)
+ sbbq %rdx, %rdx
+ andl $1, %edx
+ subq (%rcx), %rbx
+ sbbq 8(%rcx), %r15
+ sbbq 16(%rcx), %rax
+ sbbq 24(%rcx), %r14
+ sbbq 32(%rcx), %r11
+ sbbq 40(%rcx), %r10
+ sbbq 48(%rcx), %r9
+ sbbq 56(%rcx), %rsi
+ sbbq 64(%rcx), %r8
+ sbbq $0, %rdx
+ testb $1, %dl
+ jne LBB136_2
+## BB#1: ## %nocarry
+ movq %rbx, (%rdi)
+ movq %r15, 8(%rdi)
+ movq %rax, 16(%rdi)
+ movq %r14, 24(%rdi)
+ movq %r11, 32(%rdi)
+ movq %r10, 40(%rdi)
+ movq %r9, 48(%rdi)
+ movq %rsi, 56(%rdi)
+ movq %r8, 64(%rdi)
+LBB136_2: ## %carry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_addNF9L
+ .p2align 4, 0x90
+_mcl_fp_addNF9L: ## @mcl_fp_addNF9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rdi, %r8
+ movq 64(%rdx), %r10
+ movq 56(%rdx), %r11
+ movq 48(%rdx), %r9
+ movq 40(%rdx), %rax
+ movq 32(%rdx), %rdi
+ movq 24(%rdx), %rbp
+ movq 16(%rdx), %r15
+ movq (%rdx), %rbx
+ movq 8(%rdx), %r13
+ addq (%rsi), %rbx
+ adcq 8(%rsi), %r13
+ adcq 16(%rsi), %r15
+ adcq 24(%rsi), %rbp
+ movq %rbp, -24(%rsp) ## 8-byte Spill
+ adcq 32(%rsi), %rdi
+ movq %rdi, -40(%rsp) ## 8-byte Spill
+ adcq 40(%rsi), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ adcq 48(%rsi), %r9
+ movq %r9, %rdi
+ movq %rdi, -16(%rsp) ## 8-byte Spill
+ adcq 56(%rsi), %r11
+ movq %r11, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ adcq 64(%rsi), %r10
+ movq %r10, %r9
+ movq %rbx, %rsi
+ subq (%rcx), %rsi
+ movq %r13, %rdx
+ sbbq 8(%rcx), %rdx
+ movq %r15, %r12
+ sbbq 16(%rcx), %r12
+ sbbq 24(%rcx), %rbp
+ movq -40(%rsp), %r14 ## 8-byte Reload
+ sbbq 32(%rcx), %r14
+ movq -32(%rsp), %r11 ## 8-byte Reload
+ sbbq 40(%rcx), %r11
+ movq %rdi, %r10
+ sbbq 48(%rcx), %r10
+ movq %rax, %rdi
+ sbbq 56(%rcx), %rdi
+ movq %r9, %rax
+ sbbq 64(%rcx), %rax
+ movq %rax, %rcx
+ sarq $63, %rcx
+ cmovsq %rbx, %rsi
+ movq %rsi, (%r8)
+ cmovsq %r13, %rdx
+ movq %rdx, 8(%r8)
+ cmovsq %r15, %r12
+ movq %r12, 16(%r8)
+ cmovsq -24(%rsp), %rbp ## 8-byte Folded Reload
+ movq %rbp, 24(%r8)
+ cmovsq -40(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, 32(%r8)
+ cmovsq -32(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 40(%r8)
+ cmovsq -16(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 48(%r8)
+ cmovsq -8(%rsp), %rdi ## 8-byte Folded Reload
+ movq %rdi, 56(%r8)
+ cmovsq %r9, %rax
+ movq %rax, 64(%r8)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fp_sub9L
+ .p2align 4, 0x90
+_mcl_fp_sub9L: ## @mcl_fp_sub9L
+## BB#0:
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq 64(%rdx), %r13
+ movq (%rsi), %rax
+ movq 8(%rsi), %r9
+ xorl %ebx, %ebx
+ subq (%rdx), %rax
+ sbbq 8(%rdx), %r9
+ movq 16(%rsi), %r10
+ sbbq 16(%rdx), %r10
+ movq 24(%rsi), %r11
+ sbbq 24(%rdx), %r11
+ movq 32(%rsi), %r12
+ sbbq 32(%rdx), %r12
+ movq 40(%rsi), %r14
+ sbbq 40(%rdx), %r14
+ movq 48(%rsi), %r15
+ sbbq 48(%rdx), %r15
+ movq 64(%rsi), %r8
+ movq 56(%rsi), %rsi
+ sbbq 56(%rdx), %rsi
+ movq %rax, (%rdi)
+ movq %r9, 8(%rdi)
+ movq %r10, 16(%rdi)
+ movq %r11, 24(%rdi)
+ movq %r12, 32(%rdi)
+ movq %r14, 40(%rdi)
+ movq %r15, 48(%rdi)
+ movq %rsi, 56(%rdi)
+ sbbq %r13, %r8
+ movq %r8, 64(%rdi)
+ sbbq $0, %rbx
+ testb $1, %bl
+ je LBB138_2
+## BB#1: ## %carry
+ addq (%rcx), %rax
+ movq %rax, (%rdi)
+ movq 8(%rcx), %rax
+ adcq %r9, %rax
+ movq %rax, 8(%rdi)
+ movq 16(%rcx), %rax
+ adcq %r10, %rax
+ movq %rax, 16(%rdi)
+ movq 24(%rcx), %rax
+ adcq %r11, %rax
+ movq %rax, 24(%rdi)
+ movq 32(%rcx), %rax
+ adcq %r12, %rax
+ movq %rax, 32(%rdi)
+ movq 40(%rcx), %rax
+ adcq %r14, %rax
+ movq %rax, 40(%rdi)
+ movq 48(%rcx), %rax
+ adcq %r15, %rax
+ movq %rax, 48(%rdi)
+ movq 56(%rcx), %rax
+ adcq %rsi, %rax
+ movq %rax, 56(%rdi)
+ movq 64(%rcx), %rax
+ adcq %r8, %rax
+ movq %rax, 64(%rdi)
+LBB138_2: ## %nocarry
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ retq
+
+ .globl _mcl_fp_subNF9L
+ .p2align 4, 0x90
+_mcl_fp_subNF9L: ## @mcl_fp_subNF9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r11
+ movq %rdi, %rbx
+ movq 64(%rsi), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movdqu (%rdx), %xmm1
+ movdqu 16(%rdx), %xmm2
+ movdqu 32(%rdx), %xmm3
+ movdqu 48(%rdx), %xmm4
+ pshufd $78, %xmm4, %xmm0 ## xmm0 = xmm4[2,3,0,1]
+ movd %xmm0, %r12
+ movdqu (%rsi), %xmm5
+ movdqu 16(%rsi), %xmm6
+ movdqu 32(%rsi), %xmm7
+ movdqu 48(%rsi), %xmm8
+ pshufd $78, %xmm8, %xmm0 ## xmm0 = xmm8[2,3,0,1]
+ movd %xmm0, %rax
+ movd %xmm4, %r10
+ pshufd $78, %xmm3, %xmm0 ## xmm0 = xmm3[2,3,0,1]
+ movd %xmm0, %r9
+ pshufd $78, %xmm7, %xmm0 ## xmm0 = xmm7[2,3,0,1]
+ movd %xmm3, %r8
+ pshufd $78, %xmm2, %xmm3 ## xmm3 = xmm2[2,3,0,1]
+ movd %xmm3, %rcx
+ pshufd $78, %xmm6, %xmm3 ## xmm3 = xmm6[2,3,0,1]
+ movd %xmm2, %rbp
+ pshufd $78, %xmm1, %xmm2 ## xmm2 = xmm1[2,3,0,1]
+ movd %xmm2, %rsi
+ pshufd $78, %xmm5, %xmm2 ## xmm2 = xmm5[2,3,0,1]
+ movd %xmm1, %rdi
+ movd %xmm5, %r15
+ subq %rdi, %r15
+ movd %xmm2, %r14
+ sbbq %rsi, %r14
+ movd %xmm6, %r13
+ sbbq %rbp, %r13
+ movd %xmm3, %rbp
+ sbbq %rcx, %rbp
+ movd %xmm7, %rcx
+ sbbq %r8, %rcx
+ movq %rcx, -16(%rsp) ## 8-byte Spill
+ movd %xmm0, %rcx
+ sbbq %r9, %rcx
+ movq %rcx, -24(%rsp) ## 8-byte Spill
+ movd %xmm8, %rcx
+ sbbq %r10, %rcx
+ movq %rcx, -32(%rsp) ## 8-byte Spill
+ sbbq %r12, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq -40(%rsp), %rsi ## 8-byte Reload
+ sbbq 64(%rdx), %rsi
+ movq %rsi, -40(%rsp) ## 8-byte Spill
+ movq %rsi, %rax
+ sarq $63, %rax
+ movq %rax, %rcx
+ shldq $1, %rsi, %rcx
+ movq 24(%r11), %r9
+ andq %rcx, %r9
+ movq 8(%r11), %rdi
+ andq %rcx, %rdi
+ andq (%r11), %rcx
+ movq 64(%r11), %r12
+ andq %rax, %r12
+ movq 56(%r11), %r10
+ andq %rax, %r10
+ rolq %rax
+ movq 48(%r11), %r8
+ andq %rax, %r8
+ movq 40(%r11), %rsi
+ andq %rax, %rsi
+ movq 32(%r11), %rdx
+ andq %rax, %rdx
+ andq 16(%r11), %rax
+ addq %r15, %rcx
+ adcq %r14, %rdi
+ movq %rcx, (%rbx)
+ adcq %r13, %rax
+ movq %rdi, 8(%rbx)
+ adcq %rbp, %r9
+ movq %rax, 16(%rbx)
+ movq %r9, 24(%rbx)
+ adcq -16(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, 32(%rbx)
+ adcq -24(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 40(%rbx)
+ adcq -32(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 48(%rbx)
+ adcq -8(%rsp), %r10 ## 8-byte Folded Reload
+ movq %r10, 56(%rbx)
+ adcq -40(%rsp), %r12 ## 8-byte Folded Reload
+ movq %r12, 64(%rbx)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_add9L
+ .p2align 4, 0x90
+_mcl_fpDbl_add9L: ## @mcl_fpDbl_add9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r15
+ movq 136(%rdx), %rax
+ movq %rax, -48(%rsp) ## 8-byte Spill
+ movq 128(%rdx), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq 120(%rdx), %r10
+ movq 112(%rdx), %r11
+ movq 24(%rsi), %rcx
+ movq 32(%rsi), %r14
+ movq 16(%rdx), %rbp
+ movq (%rdx), %rax
+ movq 8(%rdx), %rbx
+ addq (%rsi), %rax
+ adcq 8(%rsi), %rbx
+ adcq 16(%rsi), %rbp
+ adcq 24(%rdx), %rcx
+ adcq 32(%rdx), %r14
+ movq 104(%rdx), %r9
+ movq 96(%rdx), %r13
+ movq %rax, (%rdi)
+ movq 88(%rdx), %r8
+ movq %rbx, 8(%rdi)
+ movq 80(%rdx), %r12
+ movq %rbp, 16(%rdi)
+ movq 40(%rdx), %rax
+ movq %rcx, 24(%rdi)
+ movq 40(%rsi), %rbp
+ adcq %rax, %rbp
+ movq 48(%rdx), %rcx
+ movq %r14, 32(%rdi)
+ movq 48(%rsi), %rax
+ adcq %rcx, %rax
+ movq 56(%rdx), %r14
+ movq %rbp, 40(%rdi)
+ movq 56(%rsi), %rbp
+ adcq %r14, %rbp
+ movq 72(%rdx), %rcx
+ movq 64(%rdx), %rdx
+ movq %rax, 48(%rdi)
+ movq 64(%rsi), %rax
+ adcq %rdx, %rax
+ movq 136(%rsi), %rbx
+ movq %rbp, 56(%rdi)
+ movq 72(%rsi), %rbp
+ adcq %rcx, %rbp
+ movq 128(%rsi), %rcx
+ movq %rax, 64(%rdi)
+ movq 80(%rsi), %rdx
+ adcq %r12, %rdx
+ movq 88(%rsi), %r12
+ adcq %r8, %r12
+ movq 96(%rsi), %r14
+ adcq %r13, %r14
+ movq %r14, -8(%rsp) ## 8-byte Spill
+ movq 104(%rsi), %rax
+ adcq %r9, %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq 120(%rsi), %rax
+ movq 112(%rsi), %rsi
+ adcq %r11, %rsi
+ movq %rsi, -24(%rsp) ## 8-byte Spill
+ adcq %r10, %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ adcq -40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -40(%rsp) ## 8-byte Spill
+ adcq -48(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, -48(%rsp) ## 8-byte Spill
+ sbbq %r9, %r9
+ andl $1, %r9d
+ movq %rbp, %r10
+ subq (%r15), %r10
+ movq %rdx, %r11
+ sbbq 8(%r15), %r11
+ movq %r12, %rbx
+ sbbq 16(%r15), %rbx
+ sbbq 24(%r15), %r14
+ movq -32(%rsp), %r13 ## 8-byte Reload
+ sbbq 32(%r15), %r13
+ movq -24(%rsp), %rsi ## 8-byte Reload
+ sbbq 40(%r15), %rsi
+ movq -16(%rsp), %rax ## 8-byte Reload
+ sbbq 48(%r15), %rax
+ sbbq 56(%r15), %rcx
+ movq -48(%rsp), %r8 ## 8-byte Reload
+ sbbq 64(%r15), %r8
+ sbbq $0, %r9
+ andl $1, %r9d
+ cmovneq %rbp, %r10
+ movq %r10, 72(%rdi)
+ testb %r9b, %r9b
+ cmovneq %rdx, %r11
+ movq %r11, 80(%rdi)
+ cmovneq %r12, %rbx
+ movq %rbx, 88(%rdi)
+ cmovneq -8(%rsp), %r14 ## 8-byte Folded Reload
+ movq %r14, 96(%rdi)
+ cmovneq -32(%rsp), %r13 ## 8-byte Folded Reload
+ movq %r13, 104(%rdi)
+ cmovneq -24(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 112(%rdi)
+ cmovneq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 120(%rdi)
+ cmovneq -40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 128(%rdi)
+ cmovneq -48(%rsp), %r8 ## 8-byte Folded Reload
+ movq %r8, 136(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+ .globl _mcl_fpDbl_sub9L
+ .p2align 4, 0x90
+_mcl_fpDbl_sub9L: ## @mcl_fpDbl_sub9L
+## BB#0:
+ pushq %rbp
+ pushq %r15
+ pushq %r14
+ pushq %r13
+ pushq %r12
+ pushq %rbx
+ movq %rcx, %r14
+ movq 136(%rdx), %rax
+ movq %rax, -24(%rsp) ## 8-byte Spill
+ movq 128(%rdx), %rax
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ movq 120(%rdx), %rax
+ movq %rax, -40(%rsp) ## 8-byte Spill
+ movq 16(%rsi), %r11
+ movq (%rsi), %r12
+ movq 8(%rsi), %r13
+ xorl %r9d, %r9d
+ subq (%rdx), %r12
+ sbbq 8(%rdx), %r13
+ sbbq 16(%rdx), %r11
+ movq 24(%rsi), %rbx
+ sbbq 24(%rdx), %rbx
+ movq 32(%rsi), %rbp
+ sbbq 32(%rdx), %rbp
+ movq 112(%rdx), %r10
+ movq 104(%rdx), %rcx
+ movq %r12, (%rdi)
+ movq 96(%rdx), %rax
+ movq %r13, 8(%rdi)
+ movq 88(%rdx), %r13
+ movq %r11, 16(%rdi)
+ movq 40(%rdx), %r11
+ movq %rbx, 24(%rdi)
+ movq 40(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 48(%rdx), %r11
+ movq %rbp, 32(%rdi)
+ movq 48(%rsi), %rbp
+ sbbq %r11, %rbp
+ movq 56(%rdx), %r11
+ movq %rbx, 40(%rdi)
+ movq 56(%rsi), %rbx
+ sbbq %r11, %rbx
+ movq 64(%rdx), %r11
+ movq %rbp, 48(%rdi)
+ movq 64(%rsi), %rbp
+ sbbq %r11, %rbp
+ movq 80(%rdx), %r8
+ movq 72(%rdx), %r11
+ movq %rbx, 56(%rdi)
+ movq 72(%rsi), %r15
+ sbbq %r11, %r15
+ movq 136(%rsi), %rdx
+ movq %rbp, 64(%rdi)
+ movq 80(%rsi), %rbp
+ sbbq %r8, %rbp
+ movq 88(%rsi), %r12
+ sbbq %r13, %r12
+ movq 96(%rsi), %r13
+ sbbq %rax, %r13
+ movq 104(%rsi), %rax
+ sbbq %rcx, %rax
+ movq %rax, -16(%rsp) ## 8-byte Spill
+ movq 112(%rsi), %rax
+ sbbq %r10, %rax
+ movq %rax, -8(%rsp) ## 8-byte Spill
+ movq 128(%rsi), %rax
+ movq 120(%rsi), %rcx
+ sbbq -40(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, -40(%rsp) ## 8-byte Spill
+ sbbq -32(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, -32(%rsp) ## 8-byte Spill
+ sbbq -24(%rsp), %rdx ## 8-byte Folded Reload
+ movq %rdx, -24(%rsp) ## 8-byte Spill
+ movl $0, %r8d
+ sbbq $0, %r8
+ andl $1, %r8d
+ movq (%r14), %r10
+ cmoveq %r9, %r10
+ testb %r8b, %r8b
+ movq 16(%r14), %r8
+ cmoveq %r9, %r8
+ movq 8(%r14), %rdx
+ cmoveq %r9, %rdx
+ movq 64(%r14), %rbx
+ cmoveq %r9, %rbx
+ movq 56(%r14), %r11
+ cmoveq %r9, %r11
+ movq 48(%r14), %rsi
+ cmoveq %r9, %rsi
+ movq 40(%r14), %rcx
+ cmoveq %r9, %rcx
+ movq 32(%r14), %rax
+ cmoveq %r9, %rax
+ cmovneq 24(%r14), %r9
+ addq %r15, %r10
+ adcq %rbp, %rdx
+ movq %r10, 72(%rdi)
+ adcq %r12, %r8
+ movq %rdx, 80(%rdi)
+ adcq %r13, %r9
+ movq %r8, 88(%rdi)
+ movq %r9, 96(%rdi)
+ adcq -16(%rsp), %rax ## 8-byte Folded Reload
+ movq %rax, 104(%rdi)
+ adcq -8(%rsp), %rcx ## 8-byte Folded Reload
+ movq %rcx, 112(%rdi)
+ adcq -40(%rsp), %rsi ## 8-byte Folded Reload
+ movq %rsi, 120(%rdi)
+ adcq -32(%rsp), %r11 ## 8-byte Folded Reload
+ movq %r11, 128(%rdi)
+ adcq -24(%rsp), %rbx ## 8-byte Folded Reload
+ movq %rbx, 136(%rdi)
+ popq %rbx
+ popq %r12
+ popq %r13
+ popq %r14
+ popq %r15
+ popq %rbp
+ retq
+
+
+.subsections_via_symbols
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/x86.bmi2.s b/vendor/github.com/tangerine-network/mcl/src/asm/x86.bmi2.s
new file mode 100644
index 000000000..77729c530
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/x86.bmi2.s
@@ -0,0 +1,71547 @@
+ .text
+ .file "<stdin>"
+ .globl makeNIST_P192Lbmi2
+ .align 16, 0x90
+ .type makeNIST_P192Lbmi2,@function
+makeNIST_P192Lbmi2: # @makeNIST_P192Lbmi2
+# BB#0:
+ movl 4(%esp), %eax
+ movl $-1, 20(%eax)
+ movl $-1, 16(%eax)
+ movl $-1, 12(%eax)
+ movl $-2, 8(%eax)
+ movl $-1, 4(%eax)
+ movl $-1, (%eax)
+ retl $4
+.Lfunc_end0:
+ .size makeNIST_P192Lbmi2, .Lfunc_end0-makeNIST_P192Lbmi2
+
+ .globl mcl_fpDbl_mod_NIST_P192Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P192Lbmi2,@function
+mcl_fpDbl_mod_NIST_P192Lbmi2: # @mcl_fpDbl_mod_NIST_P192Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %eax
+ movl 32(%eax), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ xorl %edx, %edx
+ movl (%eax), %ebx
+ addl %ecx, %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ adcl %edi, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 8(%eax), %ebp
+ adcl %esi, %ebp
+ movl 36(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 12(%eax), %esi
+ adcl %ecx, %esi
+ movl 40(%eax), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 16(%eax), %ecx
+ adcl %ebx, %ecx
+ movl 44(%eax), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl 20(%eax), %eax
+ adcl %edi, %eax
+ adcl $0, %edx
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl %ebx, 24(%esp) # 4-byte Folded Spill
+ movl (%esp), %ebx # 4-byte Reload
+ adcl %ebx, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %edx
+ adcl $0, %edi
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ adcl $0, %eax
+ adcl $0, %edx
+ adcl $0, %edi
+ addl %edx, 24(%esp) # 4-byte Folded Spill
+ adcl %edi, 28(%esp) # 4-byte Folded Spill
+ adcl %ebp, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ adcl $0, %ecx
+ adcl $0, %eax
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 24(%esp), %esi # 4-byte Reload
+ addl $1, %esi
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $1, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %edi, %edx
+ adcl $0, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ adcl $0, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %edx
+ adcl $0, %edx
+ adcl $-1, %ebx
+ andl $1, %ebx
+ jne .LBB1_2
+# BB#1:
+ movl %edx, %eax
+.LBB1_2:
+ testb %bl, %bl
+ movl 24(%esp), %edx # 4-byte Reload
+ jne .LBB1_4
+# BB#3:
+ movl %esi, %edx
+.LBB1_4:
+ movl 52(%esp), %esi
+ movl %edx, (%esi)
+ movl 20(%esp), %edx # 4-byte Reload
+ movl 28(%esp), %ebx # 4-byte Reload
+ jne .LBB1_6
+# BB#5:
+ movl %ebp, %ebx
+.LBB1_6:
+ movl %ebx, 4(%esi)
+ jne .LBB1_8
+# BB#7:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB1_8:
+ movl %edx, 8(%esi)
+ jne .LBB1_10
+# BB#9:
+ movl 12(%esp), %edi # 4-byte Reload
+.LBB1_10:
+ movl %edi, 12(%esi)
+ jne .LBB1_12
+# BB#11:
+ movl 16(%esp), %ecx # 4-byte Reload
+.LBB1_12:
+ movl %ecx, 16(%esi)
+ movl %eax, 20(%esi)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end1:
+ .size mcl_fpDbl_mod_NIST_P192Lbmi2, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192Lbmi2
+
+ .globl mcl_fp_sqr_NIST_P192Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sqr_NIST_P192Lbmi2,@function
+mcl_fp_sqr_NIST_P192Lbmi2: # @mcl_fp_sqr_NIST_P192Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L2$pb
+.L2$pb:
+ popl %ebx
+.Ltmp0:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L2$pb), %ebx
+ movl 116(%esp), %eax
+ movl %eax, 4(%esp)
+ leal 44(%esp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_sqrPre6Lbmi2@PLT
+ xorl %edi, %edi
+ movl 76(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 72(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %esi
+ addl %eax, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax
+ adcl %edx, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp
+ adcl %ecx, %ebp
+ movl 80(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %esi
+ adcl %eax, %esi
+ movl 84(%esp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx
+ adcl %ebx, %ecx
+ movl 88(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %edx
+ adcl %eax, %edx
+ adcl $0, %edi
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %ebx, 36(%esp) # 4-byte Folded Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 40(%esp) # 4-byte Folded Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %edi
+ adcl $0, %eax
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ adcl $0, %edx
+ adcl $0, %edi
+ adcl $0, %eax
+ addl %edi, 36(%esp) # 4-byte Folded Spill
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl %ebp, %edi
+ adcl %esi, %eax
+ adcl $0, %ecx
+ adcl $0, %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 36(%esp), %esi # 4-byte Reload
+ addl $1, %esi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ adcl $1, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl $0, %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ adcl $0, %ebp
+ adcl $-1, %ebx
+ andl $1, %ebx
+ jne .LBB2_2
+# BB#1:
+ movl %ebp, %edx
+.LBB2_2:
+ testb %bl, %bl
+ movl 36(%esp), %ebx # 4-byte Reload
+ jne .LBB2_4
+# BB#3:
+ movl %esi, %ebx
+.LBB2_4:
+ movl 112(%esp), %esi
+ movl %ebx, (%esi)
+ movl 40(%esp), %ebx # 4-byte Reload
+ jne .LBB2_6
+# BB#5:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB2_6:
+ movl %ebx, 4(%esi)
+ jne .LBB2_8
+# BB#7:
+ movl 24(%esp), %edi # 4-byte Reload
+.LBB2_8:
+ movl %edi, 8(%esi)
+ jne .LBB2_10
+# BB#9:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB2_10:
+ movl %eax, 12(%esi)
+ jne .LBB2_12
+# BB#11:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB2_12:
+ movl %ecx, 16(%esi)
+ movl %edx, 20(%esi)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end2:
+ .size mcl_fp_sqr_NIST_P192Lbmi2, .Lfunc_end2-mcl_fp_sqr_NIST_P192Lbmi2
+
+ .globl mcl_fp_mulNIST_P192Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulNIST_P192Lbmi2,@function
+mcl_fp_mulNIST_P192Lbmi2: # @mcl_fp_mulNIST_P192Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L3$pb
+.L3$pb:
+ popl %ebx
+.Ltmp1:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.L3$pb), %ebx
+ movl 120(%esp), %eax
+ movl %eax, 8(%esp)
+ movl 116(%esp), %eax
+ movl %eax, 4(%esp)
+ leal 44(%esp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6Lbmi2@PLT
+ xorl %edi, %edi
+ movl 76(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 72(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %esi
+ addl %eax, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax
+ adcl %edx, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp
+ adcl %ecx, %ebp
+ movl 80(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %esi
+ adcl %eax, %esi
+ movl 84(%esp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx
+ adcl %ebx, %ecx
+ movl 88(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %edx
+ adcl %eax, %edx
+ adcl $0, %edi
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %ebx, 36(%esp) # 4-byte Folded Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 40(%esp) # 4-byte Folded Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %edi
+ adcl $0, %eax
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ adcl $0, %edx
+ adcl $0, %edi
+ adcl $0, %eax
+ addl %edi, 36(%esp) # 4-byte Folded Spill
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl %ebp, %edi
+ adcl %esi, %eax
+ adcl $0, %ecx
+ adcl $0, %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 36(%esp), %esi # 4-byte Reload
+ addl $1, %esi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ adcl $1, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl $0, %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ adcl $0, %ebp
+ adcl $-1, %ebx
+ andl $1, %ebx
+ jne .LBB3_2
+# BB#1:
+ movl %ebp, %edx
+.LBB3_2:
+ testb %bl, %bl
+ movl 36(%esp), %ebx # 4-byte Reload
+ jne .LBB3_4
+# BB#3:
+ movl %esi, %ebx
+.LBB3_4:
+ movl 112(%esp), %esi
+ movl %ebx, (%esi)
+ movl 40(%esp), %ebx # 4-byte Reload
+ jne .LBB3_6
+# BB#5:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB3_6:
+ movl %ebx, 4(%esi)
+ jne .LBB3_8
+# BB#7:
+ movl 24(%esp), %edi # 4-byte Reload
+.LBB3_8:
+ movl %edi, 8(%esi)
+ jne .LBB3_10
+# BB#9:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB3_10:
+ movl %eax, 12(%esi)
+ jne .LBB3_12
+# BB#11:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB3_12:
+ movl %ecx, 16(%esi)
+ movl %edx, 20(%esi)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end3:
+ .size mcl_fp_mulNIST_P192Lbmi2, .Lfunc_end3-mcl_fp_mulNIST_P192Lbmi2
+
+ .globl mcl_fpDbl_mod_NIST_P521Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P521Lbmi2,@function
+mcl_fpDbl_mod_NIST_P521Lbmi2: # @mcl_fpDbl_mod_NIST_P521Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ecx
+ movl 124(%ecx), %edx
+ movl 128(%ecx), %esi
+ movl %esi, %eax
+ shldl $23, %edx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 120(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 116(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 112(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 108(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 104(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 100(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 92(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 84(%ecx), %edi
+ shldl $23, %edi, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 80(%ecx), %edx
+ shldl $23, %edx, %edi
+ movl 76(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl 72(%ecx), %ebx
+ shldl $23, %ebx, %eax
+ movl 68(%ecx), %ebp
+ shldl $23, %ebp, %ebx
+ shrl $9, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ shldl $23, %esi, %ebp
+ andl $511, %esi # imm = 0x1FF
+ addl (%ecx), %ebp
+ adcl 4(%ecx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ adcl 8(%ecx), %eax
+ adcl 12(%ecx), %edx
+ adcl 16(%ecx), %edi
+ movl 28(%esp), %ebx # 4-byte Reload
+ adcl 20(%ecx), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 24(%ecx), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl 28(%ecx), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl 32(%ecx), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 36(%ecx), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl 40(%ecx), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 24(%esp), %ebx # 4-byte Reload
+ adcl 44(%ecx), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 48(%ecx), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 20(%esp), %ebx # 4-byte Reload
+ adcl 52(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 56(%ecx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl 60(%ecx), %ebx
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ shrl $9, %ecx
+ andl $1, %ecx
+ addl %ebp, %ecx
+ adcl $0, 16(%esp) # 4-byte Folded Spill
+ adcl $0, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ebx, %ebp
+ adcl $0, %ebp
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %ecx, %edi
+ andl %eax, %edi
+ andl %edx, %edi
+ andl %esi, %edi
+ andl 28(%esp), %edi # 4-byte Folded Reload
+ andl 32(%esp), %edi # 4-byte Folded Reload
+ andl 36(%esp), %edi # 4-byte Folded Reload
+ andl 40(%esp), %edi # 4-byte Folded Reload
+ andl 44(%esp), %edi # 4-byte Folded Reload
+ andl 48(%esp), %edi # 4-byte Folded Reload
+ andl 24(%esp), %edi # 4-byte Folded Reload
+ andl 52(%esp), %edi # 4-byte Folded Reload
+ movl 20(%esp), %esi # 4-byte Reload
+ andl %esi, %edi
+ andl 56(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, %edx
+ movl 16(%esp), %ebx # 4-byte Reload
+ andl %ebp, %edi
+ movl %ebp, %eax
+ movl %edx, %ebp
+ orl $-512, %ebp # imm = 0xFFFFFFFFFFFFFE00
+ andl %edi, %ebp
+ andl %ebx, %ebp
+ cmpl $-1, %ebp
+ movl 80(%esp), %edi
+ je .LBB4_1
+# BB#3: # %nonzero
+ movl %ecx, (%edi)
+ movl %ebx, 4(%edi)
+ movl (%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%edi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%edi)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%edi)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%edi)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%edi)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%edi)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%edi)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%edi)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%edi)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%edi)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%edi)
+ movl %esi, 52(%edi)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%edi)
+ movl %eax, 60(%edi)
+ andl $511, %edx # imm = 0x1FF
+ movl %edx, 64(%edi)
+ jmp .LBB4_2
+.LBB4_1: # %zero
+ xorl %eax, %eax
+ movl $17, %ecx
+ rep;stosl
+.LBB4_2: # %zero
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end4:
+ .size mcl_fpDbl_mod_NIST_P521Lbmi2, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521Lbmi2
+
+ .globl mcl_fp_mulUnitPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre1Lbmi2,@function
+mcl_fp_mulUnitPre1Lbmi2: # @mcl_fp_mulUnitPre1Lbmi2
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %edx
+ mulxl 12(%esp), %ecx, %eax
+ movl 4(%esp), %edx
+ movl %ecx, (%edx)
+ movl %eax, 4(%edx)
+ retl
+.Lfunc_end5:
+ .size mcl_fp_mulUnitPre1Lbmi2, .Lfunc_end5-mcl_fp_mulUnitPre1Lbmi2
+
+ .globl mcl_fpDbl_mulPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre1Lbmi2,@function
+mcl_fpDbl_mulPre1Lbmi2: # @mcl_fpDbl_mulPre1Lbmi2
+# BB#0:
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 8(%esp), %eax
+ mulxl (%eax), %ecx, %eax
+ movl 4(%esp), %edx
+ movl %ecx, (%edx)
+ movl %eax, 4(%edx)
+ retl
+.Lfunc_end6:
+ .size mcl_fpDbl_mulPre1Lbmi2, .Lfunc_end6-mcl_fpDbl_mulPre1Lbmi2
+
+ .globl mcl_fpDbl_sqrPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre1Lbmi2,@function
+mcl_fpDbl_sqrPre1Lbmi2: # @mcl_fpDbl_sqrPre1Lbmi2
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %edx
+ mulxl %edx, %ecx, %eax
+ movl 4(%esp), %edx
+ movl %ecx, (%edx)
+ movl %eax, 4(%edx)
+ retl
+.Lfunc_end7:
+ .size mcl_fpDbl_sqrPre1Lbmi2, .Lfunc_end7-mcl_fpDbl_sqrPre1Lbmi2
+
+ .globl mcl_fp_mont1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont1Lbmi2,@function
+mcl_fp_mont1Lbmi2: # @mcl_fp_mont1Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %edx
+ movl 20(%esp), %eax
+ mulxl (%eax), %esi, %ecx
+ movl 24(%esp), %eax
+ movl -4(%eax), %edx
+ imull %esi, %edx
+ movl (%eax), %edi
+ mulxl %edi, %edx, %eax
+ addl %esi, %edx
+ adcl %ecx, %eax
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl %eax, %ecx
+ subl %edi, %ecx
+ sbbl $0, %edx
+ testb $1, %dl
+ jne .LBB8_2
+# BB#1:
+ movl %ecx, %eax
+.LBB8_2:
+ movl 12(%esp), %ecx
+ movl %eax, (%ecx)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end8:
+ .size mcl_fp_mont1Lbmi2, .Lfunc_end8-mcl_fp_mont1Lbmi2
+
+ .globl mcl_fp_montNF1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF1Lbmi2,@function
+mcl_fp_montNF1Lbmi2: # @mcl_fp_montNF1Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %edx
+ movl 20(%esp), %eax
+ mulxl (%eax), %esi, %ecx
+ movl 24(%esp), %eax
+ movl -4(%eax), %edx
+ imull %esi, %edx
+ movl (%eax), %edi
+ mulxl %edi, %edx, %eax
+ addl %esi, %edx
+ adcl %ecx, %eax
+ movl %eax, %ecx
+ subl %edi, %ecx
+ js .LBB9_2
+# BB#1:
+ movl %ecx, %eax
+.LBB9_2:
+ movl 12(%esp), %ecx
+ movl %eax, (%ecx)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end9:
+ .size mcl_fp_montNF1Lbmi2, .Lfunc_end9-mcl_fp_montNF1Lbmi2
+
+ .globl mcl_fp_montRed1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed1Lbmi2,@function
+mcl_fp_montRed1Lbmi2: # @mcl_fp_montRed1Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %esi
+ movl 20(%esp), %eax
+ movl -4(%eax), %edx
+ imull %esi, %edx
+ movl (%eax), %edi
+ mulxl %edi, %edx, %eax
+ addl %esi, %edx
+ adcl 4(%ecx), %eax
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl %eax, %ecx
+ subl %edi, %ecx
+ sbbl $0, %edx
+ testb $1, %dl
+ jne .LBB10_2
+# BB#1:
+ movl %ecx, %eax
+.LBB10_2:
+ movl 12(%esp), %ecx
+ movl %eax, (%ecx)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end10:
+ .size mcl_fp_montRed1Lbmi2, .Lfunc_end10-mcl_fp_montRed1Lbmi2
+
+ .globl mcl_fp_addPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre1Lbmi2,@function
+mcl_fp_addPre1Lbmi2: # @mcl_fp_addPre1Lbmi2
+# BB#0:
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ movl 4(%esp), %ecx
+ movl 8(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, (%ecx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ retl
+.Lfunc_end11:
+ .size mcl_fp_addPre1Lbmi2, .Lfunc_end11-mcl_fp_addPre1Lbmi2
+
+ .globl mcl_fp_subPre1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre1Lbmi2,@function
+mcl_fp_subPre1Lbmi2: # @mcl_fp_subPre1Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ xorl %eax, %eax
+ movl 8(%esp), %edx
+ movl 16(%esp), %esi
+ subl (%esi), %ecx
+ movl %ecx, (%edx)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end12:
+ .size mcl_fp_subPre1Lbmi2, .Lfunc_end12-mcl_fp_subPre1Lbmi2
+
+ .globl mcl_fp_shr1_1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_1Lbmi2,@function
+mcl_fp_shr1_1Lbmi2: # @mcl_fp_shr1_1Lbmi2
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %eax
+ shrl %eax
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ retl
+.Lfunc_end13:
+ .size mcl_fp_shr1_1Lbmi2, .Lfunc_end13-mcl_fp_shr1_1Lbmi2
+
+ .globl mcl_fp_add1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add1Lbmi2,@function
+mcl_fp_add1Lbmi2: # @mcl_fp_add1Lbmi2
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %eax
+ movl 8(%esp), %ecx
+ movl 12(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, (%ecx)
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 20(%esp), %esi
+ subl (%esi), %eax
+ sbbl $0, %edx
+ testb $1, %dl
+ jne .LBB14_2
+# BB#1: # %nocarry
+ movl %eax, (%ecx)
+.LBB14_2: # %carry
+ popl %esi
+ retl
+.Lfunc_end14:
+ .size mcl_fp_add1Lbmi2, .Lfunc_end14-mcl_fp_add1Lbmi2
+
+ .globl mcl_fp_addNF1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF1Lbmi2,@function
+mcl_fp_addNF1Lbmi2: # @mcl_fp_addNF1Lbmi2
+# BB#0:
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ movl 8(%esp), %ecx
+ addl (%ecx), %eax
+ movl 16(%esp), %edx
+ movl %eax, %ecx
+ subl (%edx), %ecx
+ js .LBB15_2
+# BB#1:
+ movl %ecx, %eax
+.LBB15_2:
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ retl
+.Lfunc_end15:
+ .size mcl_fp_addNF1Lbmi2, .Lfunc_end15-mcl_fp_addNF1Lbmi2
+
+ .globl mcl_fp_sub1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub1Lbmi2,@function
+mcl_fp_sub1Lbmi2: # @mcl_fp_sub1Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ xorl %edx, %edx
+ movl 8(%esp), %ecx
+ movl 16(%esp), %esi
+ subl (%esi), %eax
+ movl %eax, (%ecx)
+ sbbl $0, %edx
+ testb $1, %dl
+ jne .LBB16_2
+# BB#1: # %nocarry
+ popl %esi
+ retl
+.LBB16_2: # %carry
+ movl 20(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, (%ecx)
+ popl %esi
+ retl
+.Lfunc_end16:
+ .size mcl_fp_sub1Lbmi2, .Lfunc_end16-mcl_fp_sub1Lbmi2
+
+ .globl mcl_fp_subNF1Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF1Lbmi2,@function
+mcl_fp_subNF1Lbmi2: # @mcl_fp_subNF1Lbmi2
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %eax
+ movl 12(%esp), %ecx
+ subl (%ecx), %eax
+ movl %eax, %ecx
+ sarl $31, %ecx
+ movl 16(%esp), %edx
+ andl (%edx), %ecx
+ addl %eax, %ecx
+ movl 4(%esp), %eax
+ movl %ecx, (%eax)
+ retl
+.Lfunc_end17:
+ .size mcl_fp_subNF1Lbmi2, .Lfunc_end17-mcl_fp_subNF1Lbmi2
+
+ .globl mcl_fpDbl_add1Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add1Lbmi2,@function
+mcl_fpDbl_add1Lbmi2: # @mcl_fpDbl_add1Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %esi
+ movl 20(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %eax
+ movl 16(%esp), %esi
+ addl (%esi), %edx
+ movl 12(%esp), %ecx
+ adcl 4(%esi), %eax
+ movl %edx, (%ecx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 24(%esp), %esi
+ movl %eax, %edx
+ subl (%esi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB18_2
+# BB#1:
+ movl %edx, %eax
+.LBB18_2:
+ movl %eax, 4(%ecx)
+ popl %esi
+ popl %ebx
+ retl
+.Lfunc_end18:
+ .size mcl_fpDbl_add1Lbmi2, .Lfunc_end18-mcl_fpDbl_add1Lbmi2
+
+ .globl mcl_fpDbl_sub1Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub1Lbmi2,@function
+mcl_fpDbl_sub1Lbmi2: # @mcl_fpDbl_sub1Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %eax
+ xorl %ecx, %ecx
+ movl 16(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %eax
+ movl 8(%esp), %edx
+ movl %esi, (%edx)
+ sbbl $0, %ecx
+ andl $1, %ecx
+ je .LBB19_2
+# BB#1:
+ movl 20(%esp), %ecx
+ movl (%ecx), %ecx
+.LBB19_2:
+ addl %eax, %ecx
+ movl %ecx, 4(%edx)
+ popl %esi
+ retl
+.Lfunc_end19:
+ .size mcl_fpDbl_sub1Lbmi2, .Lfunc_end19-mcl_fpDbl_sub1Lbmi2
+
+ .globl mcl_fp_mulUnitPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre2Lbmi2,@function
+mcl_fp_mulUnitPre2Lbmi2: # @mcl_fp_mulUnitPre2Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %edx
+ movl 16(%esp), %eax
+ mulxl 4(%eax), %ecx, %esi
+ mulxl (%eax), %eax, %edx
+ movl 12(%esp), %edi
+ movl %eax, (%edi)
+ addl %ecx, %edx
+ movl %edx, 4(%edi)
+ adcl $0, %esi
+ movl %esi, 8(%edi)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end20:
+ .size mcl_fp_mulUnitPre2Lbmi2, .Lfunc_end20-mcl_fp_mulUnitPre2Lbmi2
+
+ .globl mcl_fpDbl_mulPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre2Lbmi2,@function
+mcl_fpDbl_mulPre2Lbmi2: # @mcl_fpDbl_mulPre2Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %ecx
+ movl 28(%esp), %esi
+ movl (%esi), %edi
+ movl %ecx, %edx
+ mulxl %edi, %ebx, %ebp
+ movl %eax, %edx
+ mulxl %edi, %edx, %edi
+ addl %ebx, %edi
+ movl 20(%esp), %ebx
+ movl %edx, (%ebx)
+ adcl $0, %ebp
+ movl 4(%esi), %esi
+ movl %eax, %edx
+ mulxl %esi, %eax, %ebx
+ addl %edi, %eax
+ movl %ecx, %edx
+ mulxl %esi, %edx, %ecx
+ adcl %ebp, %edx
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl %ebx, %edx
+ movl 20(%esp), %edi
+ movl %eax, 4(%edi)
+ movl %edx, 8(%edi)
+ adcl %ecx, %esi
+ movl %esi, 12(%edi)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end21:
+ .size mcl_fpDbl_mulPre2Lbmi2, .Lfunc_end21-mcl_fpDbl_mulPre2Lbmi2
+
+ .globl mcl_fpDbl_sqrPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre2Lbmi2,@function
+mcl_fpDbl_sqrPre2Lbmi2: # @mcl_fpDbl_sqrPre2Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %ecx
+ movl 16(%esp), %esi
+ movl %eax, %edx
+ mulxl %eax, %edx, %edi
+ movl %edx, (%esi)
+ movl %ecx, %edx
+ mulxl %eax, %edx, %eax
+ addl %edx, %edi
+ movl %eax, %ebx
+ adcl $0, %ebx
+ addl %edx, %edi
+ movl %ecx, %edx
+ mulxl %ecx, %edx, %ecx
+ adcl %ebx, %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ addl %eax, %edx
+ movl %edi, 4(%esi)
+ movl %edx, 8(%esi)
+ adcl %ecx, %ebx
+ movl %ebx, 12(%esi)
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end22:
+ .size mcl_fpDbl_sqrPre2Lbmi2, .Lfunc_end22-mcl_fpDbl_sqrPre2Lbmi2
+
+ .globl mcl_fp_mont2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont2Lbmi2,@function
+mcl_fp_mont2Lbmi2: # @mcl_fp_mont2Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 48(%esp), %eax
+ movl (%eax), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 52(%esp), %eax
+ movl (%eax), %eax
+ mulxl %eax, %ecx, %esi
+ movl %edi, %edx
+ mulxl %eax, %edx, %edi
+ movl %edx, (%esp) # 4-byte Spill
+ addl %ecx, %edi
+ adcl $0, %esi
+ movl 56(%esp), %eax
+ movl -4(%eax), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ imull %ecx, %edx
+ movl (%eax), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 4(%eax), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ mulxl %eax, %ebp, %ecx
+ mulxl %ebx, %edx, %eax
+ addl %ebp, %eax
+ adcl $0, %ecx
+ addl (%esp), %edx # 4-byte Folded Reload
+ adcl %edi, %eax
+ adcl %esi, %ecx
+ movl 52(%esp), %edx
+ movl 4(%edx), %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ mulxl 4(%esp), %esi, %ebp # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ mulxl 8(%esp), %edi, %esi # 4-byte Folded Reload
+ addl 4(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl %eax, %edi
+ adcl %ecx, %esi
+ adcl %ebx, %ebp
+ sbbl %ecx, %ecx
+ movl 12(%esp), %edx # 4-byte Reload
+ imull %edi, %edx
+ movl %edx, %eax
+ mulxl 16(%esp), %ebx, %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, %edx
+ mulxl 20(%esp), %edx, %eax # 4-byte Folded Reload
+ addl 12(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ andl $1, %ecx
+ addl %edi, %ebx
+ adcl %esi, %edx
+ adcl %ebp, %eax
+ adcl $0, %ecx
+ movl %edx, %ebp
+ subl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, %esi
+ sbbl 20(%esp), %esi # 4-byte Folded Reload
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB23_2
+# BB#1:
+ movl %ebp, %edx
+.LBB23_2:
+ movl 44(%esp), %edi
+ movl %edx, (%edi)
+ testb %cl, %cl
+ jne .LBB23_4
+# BB#3:
+ movl %esi, %eax
+.LBB23_4:
+ movl %eax, 4(%edi)
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end23:
+ .size mcl_fp_mont2Lbmi2, .Lfunc_end23-mcl_fp_mont2Lbmi2
+
+ .globl mcl_fp_montNF2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF2Lbmi2,@function
+mcl_fp_montNF2Lbmi2: # @mcl_fp_montNF2Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 44(%esp), %eax
+ movl (%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %eax
+ movl (%eax), %eax
+ mulxl %eax, %edi, %ebp
+ movl %ecx, %edx
+ mulxl %eax, %ecx, %esi
+ addl %edi, %esi
+ adcl $0, %ebp
+ movl 52(%esp), %eax
+ movl -4(%eax), %ebx
+ movl %ecx, %edx
+ imull %ebx, %edx
+ movl (%eax), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ mulxl %eax, %edi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ addl %ecx, %edi
+ movl 52(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ mulxl %eax, %edi, %edx
+ adcl %esi, %edi
+ adcl $0, %ebp
+ addl (%esp), %edi # 4-byte Folded Reload
+ adcl %edx, %ebp
+ movl 48(%esp), %eax
+ movl 4(%eax), %edx
+ mulxl 4(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ mulxl 8(%esp), %eax, %ecx # 4-byte Folded Reload
+ addl 4(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %edi, %eax
+ adcl %ebp, %ecx
+ adcl $0, %esi
+ imull %eax, %ebx
+ movl %ebx, %edx
+ movl 16(%esp), %ebp # 4-byte Reload
+ mulxl %ebp, %edx, %edi
+ addl %eax, %edx
+ movl %ebx, %edx
+ movl 12(%esp), %ebx # 4-byte Reload
+ mulxl %ebx, %eax, %edx
+ adcl %ecx, %eax
+ adcl $0, %esi
+ addl %edi, %eax
+ adcl %edx, %esi
+ movl %eax, %edx
+ subl %ebp, %edx
+ movl %esi, %ecx
+ sbbl %ebx, %ecx
+ testl %ecx, %ecx
+ js .LBB24_2
+# BB#1:
+ movl %edx, %eax
+.LBB24_2:
+ movl 40(%esp), %edx
+ movl %eax, (%edx)
+ js .LBB24_4
+# BB#3:
+ movl %ecx, %esi
+.LBB24_4:
+ movl %esi, 4(%edx)
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end24:
+ .size mcl_fp_montNF2Lbmi2, .Lfunc_end24-mcl_fp_montNF2Lbmi2
+
+ .globl mcl_fp_montRed2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed2Lbmi2,@function
+mcl_fp_montRed2Lbmi2: # @mcl_fp_montRed2Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 44(%esp), %esi
+ movl -4(%esi), %ecx
+ movl (%esi), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 40(%esp), %eax
+ movl (%eax), %ebx
+ movl %ebx, %edx
+ imull %ecx, %edx
+ movl 4(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ mulxl %eax, %ebp, %esi
+ mulxl %edi, %edx, %eax
+ addl %ebp, %eax
+ adcl $0, %esi
+ addl %ebx, %edx
+ movl 40(%esp), %edi
+ movl 12(%edi), %edx
+ adcl 4(%edi), %eax
+ adcl 8(%edi), %esi
+ adcl $0, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ imull %eax, %ecx
+ movl %ecx, %edx
+ mulxl 8(%esp), %edi, %edx # 4-byte Folded Reload
+ movl %edx, (%esp) # 4-byte Spill
+ movl %ecx, %edx
+ mulxl 12(%esp), %edx, %ebp # 4-byte Folded Reload
+ addl (%esp), %edx # 4-byte Folded Reload
+ adcl $0, %ebp
+ andl $1, %ebx
+ addl %eax, %edi
+ adcl %esi, %edx
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %edx, %edi
+ subl 8(%esp), %edi # 4-byte Folded Reload
+ movl %ebp, %ecx
+ sbbl 12(%esp), %ecx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB25_2
+# BB#1:
+ movl %edi, %edx
+.LBB25_2:
+ movl 36(%esp), %esi
+ movl %edx, (%esi)
+ testb %bl, %bl
+ jne .LBB25_4
+# BB#3:
+ movl %ecx, %ebp
+.LBB25_4:
+ movl %ebp, 4(%esi)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end25:
+ .size mcl_fp_montRed2Lbmi2, .Lfunc_end25-mcl_fp_montRed2Lbmi2
+
+ .globl mcl_fp_addPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre2Lbmi2,@function
+mcl_fp_addPre2Lbmi2: # @mcl_fp_addPre2Lbmi2
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ movl 12(%esp), %edx
+ addl (%edx), %ecx
+ movl 8(%esp), %esi
+ adcl 4(%edx), %eax
+ movl %ecx, (%esi)
+ movl %eax, 4(%esi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end26:
+ .size mcl_fp_addPre2Lbmi2, .Lfunc_end26-mcl_fp_addPre2Lbmi2
+
+ .globl mcl_fp_subPre2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre2Lbmi2,@function
+mcl_fp_subPre2Lbmi2: # @mcl_fp_subPre2Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ xorl %eax, %eax
+ movl 16(%esp), %esi
+ subl (%esi), %ecx
+ sbbl 4(%esi), %edx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl %edx, 4(%esi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end27:
+ .size mcl_fp_subPre2Lbmi2, .Lfunc_end27-mcl_fp_subPre2Lbmi2
+
+ .globl mcl_fp_shr1_2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_2Lbmi2,@function
+mcl_fp_shr1_2Lbmi2: # @mcl_fp_shr1_2Lbmi2
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %ecx
+ movl 4(%esp), %edx
+ movl %ecx, (%edx)
+ shrl %eax
+ movl %eax, 4(%edx)
+ retl
+.Lfunc_end28:
+ .size mcl_fp_shr1_2Lbmi2, .Lfunc_end28-mcl_fp_shr1_2Lbmi2
+
+ .globl mcl_fp_add2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add2Lbmi2,@function
+mcl_fp_add2Lbmi2: # @mcl_fp_add2Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %esi
+ movl 20(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %ecx
+ movl 16(%esp), %esi
+ addl (%esi), %eax
+ movl 12(%esp), %edx
+ adcl 4(%esi), %ecx
+ movl %eax, (%edx)
+ movl %ecx, 4(%edx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 24(%esp), %esi
+ subl (%esi), %eax
+ sbbl 4(%esi), %ecx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB29_2
+# BB#1: # %nocarry
+ movl %eax, (%edx)
+ movl %ecx, 4(%edx)
+.LBB29_2: # %carry
+ popl %esi
+ popl %ebx
+ retl
+.Lfunc_end29:
+ .size mcl_fp_add2Lbmi2, .Lfunc_end29-mcl_fp_add2Lbmi2
+
+ .globl mcl_fp_addNF2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF2Lbmi2,@function
+mcl_fp_addNF2Lbmi2: # @mcl_fp_addNF2Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ movl 16(%esp), %edx
+ addl (%edx), %ecx
+ adcl 4(%edx), %eax
+ movl 24(%esp), %edi
+ movl %ecx, %esi
+ subl (%edi), %esi
+ movl %eax, %edx
+ sbbl 4(%edi), %edx
+ testl %edx, %edx
+ js .LBB30_2
+# BB#1:
+ movl %esi, %ecx
+.LBB30_2:
+ movl 12(%esp), %esi
+ movl %ecx, (%esi)
+ js .LBB30_4
+# BB#3:
+ movl %edx, %eax
+.LBB30_4:
+ movl %eax, 4(%esi)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end30:
+ .size mcl_fp_addNF2Lbmi2, .Lfunc_end30-mcl_fp_addNF2Lbmi2
+
+ .globl mcl_fp_sub2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub2Lbmi2,@function
+mcl_fp_sub2Lbmi2: # @mcl_fp_sub2Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ xorl %ebx, %ebx
+ movl 24(%esp), %edx
+ subl (%edx), %ecx
+ sbbl 4(%edx), %eax
+ movl 16(%esp), %edx
+ movl %ecx, (%edx)
+ movl %eax, 4(%edx)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB31_2
+# BB#1: # %carry
+ movl 28(%esp), %esi
+ movl 4(%esi), %edi
+ addl (%esi), %ecx
+ movl %ecx, (%edx)
+ adcl %eax, %edi
+ movl %edi, 4(%edx)
+.LBB31_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end31:
+ .size mcl_fp_sub2Lbmi2, .Lfunc_end31-mcl_fp_sub2Lbmi2
+
+ .globl mcl_fp_subNF2Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF2Lbmi2,@function
+mcl_fp_subNF2Lbmi2: # @mcl_fp_subNF2Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ movl 20(%esp), %edx
+ subl (%edx), %ecx
+ sbbl 4(%edx), %eax
+ movl %eax, %edx
+ sarl $31, %edx
+ movl 24(%esp), %esi
+ movl 4(%esi), %edi
+ andl %edx, %edi
+ andl (%esi), %edx
+ addl %ecx, %edx
+ movl 12(%esp), %ecx
+ movl %edx, (%ecx)
+ adcl %eax, %edi
+ movl %edi, 4(%ecx)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end32:
+ .size mcl_fp_subNF2Lbmi2, .Lfunc_end32-mcl_fp_subNF2Lbmi2
+
+ .globl mcl_fpDbl_add2Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add2Lbmi2,@function
+mcl_fpDbl_add2Lbmi2: # @mcl_fpDbl_add2Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %edx
+ movl 12(%edx), %esi
+ movl 24(%esp), %edi
+ movl 12(%edi), %eax
+ movl 8(%edx), %ecx
+ movl (%edx), %ebx
+ movl 4(%edx), %ebp
+ addl (%edi), %ebx
+ adcl 4(%edi), %ebp
+ movl 20(%esp), %edx
+ adcl 8(%edi), %ecx
+ movl %ebx, (%edx)
+ movl %ebp, 4(%edx)
+ adcl %esi, %eax
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 32(%esp), %ebp
+ movl %ecx, %esi
+ subl (%ebp), %esi
+ movl %eax, %edi
+ sbbl 4(%ebp), %edi
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB33_2
+# BB#1:
+ movl %edi, %eax
+.LBB33_2:
+ testb %bl, %bl
+ jne .LBB33_4
+# BB#3:
+ movl %esi, %ecx
+.LBB33_4:
+ movl %ecx, 8(%edx)
+ movl %eax, 12(%edx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end33:
+ .size mcl_fpDbl_add2Lbmi2, .Lfunc_end33-mcl_fpDbl_add2Lbmi2
+
+ .globl mcl_fpDbl_sub2Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub2Lbmi2,@function
+mcl_fpDbl_sub2Lbmi2: # @mcl_fpDbl_sub2Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %ebx, %ebx
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %eax
+ sbbl 8(%edx), %eax
+ movl 12(%edx), %ebp
+ movl 12(%ecx), %edx
+ movl 20(%esp), %ecx
+ movl %esi, (%ecx)
+ movl %edi, 4(%ecx)
+ sbbl %ebp, %edx
+ movl 32(%esp), %edi
+ movl (%edi), %esi
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB34_1
+# BB#2:
+ xorl %edi, %edi
+ jmp .LBB34_3
+.LBB34_1:
+ movl 4(%edi), %edi
+.LBB34_3:
+ testb %bl, %bl
+ jne .LBB34_5
+# BB#4:
+ xorl %esi, %esi
+.LBB34_5:
+ addl %eax, %esi
+ movl %esi, 8(%ecx)
+ adcl %edx, %edi
+ movl %edi, 12(%ecx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end34:
+ .size mcl_fpDbl_sub2Lbmi2, .Lfunc_end34-mcl_fpDbl_sub2Lbmi2
+
+ .globl mcl_fp_mulUnitPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre3Lbmi2,@function
+mcl_fp_mulUnitPre3Lbmi2: # @mcl_fp_mulUnitPre3Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %edx
+ movl 20(%esp), %eax
+ mulxl 4(%eax), %esi, %ecx
+ mulxl (%eax), %edi, %ebx
+ addl %esi, %ebx
+ mulxl 8(%eax), %eax, %edx
+ movl 16(%esp), %esi
+ movl %edi, (%esi)
+ movl %ebx, 4(%esi)
+ adcl %ecx, %eax
+ movl %eax, 8(%esi)
+ adcl $0, %edx
+ movl %edx, 12(%esi)
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end35:
+ .size mcl_fp_mulUnitPre3Lbmi2, .Lfunc_end35-mcl_fp_mulUnitPre3Lbmi2
+
+ .globl mcl_fpDbl_mulPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre3Lbmi2,@function
+mcl_fpDbl_mulPre3Lbmi2: # @mcl_fpDbl_mulPre3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 40(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %esi
+ movl (%esi), %edi
+ mulxl %edi, %ebx, %ebp
+ movl %eax, %edx
+ movl %eax, %esi
+ mulxl %edi, %edx, %eax
+ movl %edx, 4(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ movl 8(%ecx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ mulxl %edi, %ebx, %edi
+ adcl %ebp, %ebx
+ movl 36(%esp), %ecx
+ movl 4(%esp), %edx # 4-byte Reload
+ movl %edx, (%ecx)
+ adcl $0, %edi
+ movl 44(%esp), %ecx
+ movl 4(%ecx), %ebp
+ movl %esi, %edx
+ mulxl %ebp, %ecx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ addl %eax, %ecx
+ movl 12(%esp), %edx # 4-byte Reload
+ mulxl %ebp, %eax, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %ebx, %eax
+ movl 8(%esp), %edx # 4-byte Reload
+ mulxl %ebp, %ebx, %edx
+ adcl %edi, %ebx
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl 4(%esp), %eax # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ adcl %edx, %edi
+ movl 36(%esp), %edx
+ movl %ecx, 4(%edx)
+ movl 44(%esp), %ecx
+ movl 8(%ecx), %ecx
+ movl %esi, %edx
+ mulxl %ecx, %ebp, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ addl %eax, %ebp
+ movl 12(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %eax, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl %ebx, %eax
+ movl 8(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %edx, %ecx
+ adcl %edi, %edx
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl 4(%esp), %eax # 4-byte Folded Reload
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl 36(%esp), %edi
+ movl %ebp, 8(%edi)
+ movl %eax, 12(%edi)
+ movl %edx, 16(%edi)
+ adcl %ecx, %esi
+ movl %esi, 20(%edi)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end36:
+ .size mcl_fpDbl_mulPre3Lbmi2, .Lfunc_end36-mcl_fpDbl_mulPre3Lbmi2
+
+ .globl mcl_fpDbl_sqrPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre3Lbmi2,@function
+mcl_fpDbl_sqrPre3Lbmi2: # @mcl_fpDbl_sqrPre3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 44(%esp), %edx
+ movl 8(%edx), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl 4(%edx), %esi
+ movl 40(%esp), %eax
+ movl %ecx, %edx
+ mulxl %ecx, %edx, %ebx
+ movl %edx, (%eax)
+ movl %esi, %edx
+ mulxl %ecx, %ebp, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ addl %ebp, %ebx
+ movl %edi, %edx
+ mulxl %ecx, %edx, %ecx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl %edx, %edi
+ adcl $0, %ecx
+ addl %ebp, %ebx
+ movl %esi, %edx
+ mulxl %esi, %ebp, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, %edx
+ mulxl %esi, %edx, %esi
+ adcl %edx, %ecx
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ adcl %esi, %edi
+ addl 12(%esp), %ebp # 4-byte Folded Reload
+ adcl %edx, %ecx
+ movl %eax, %edx
+ mulxl %eax, %edx, %eax
+ adcl %edi, %edx
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl 16(%esp), %ecx # 4-byte Folded Reload
+ adcl %esi, %edx
+ movl 40(%esp), %esi
+ movl %ebx, 4(%esi)
+ movl %ebp, 8(%esi)
+ movl %ecx, 12(%esi)
+ movl %edx, 16(%esi)
+ adcl %eax, %edi
+ movl %edi, 20(%esi)
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end37:
+ .size mcl_fpDbl_sqrPre3Lbmi2, .Lfunc_end37-mcl_fpDbl_sqrPre3Lbmi2
+
+ .globl mcl_fp_mont3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont3Lbmi2,@function
+mcl_fp_mont3Lbmi2: # @mcl_fp_mont3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 68(%esp), %eax
+ movl 8(%eax), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx
+ movl (%ecx), %ecx
+ mulxl %ecx, %edx, %edi
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl (%eax), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ mulxl %ecx, %eax, %ebp
+ movl %esi, %edx
+ mulxl %ecx, %edx, %ebx
+ movl %edx, 4(%esp) # 4-byte Spill
+ addl %eax, %ebx
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 24(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 76(%esp), %esi
+ movl -4(%esi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ imull %eax, %edx
+ movl (%esi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 4(%esi), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %edi
+ movl %edi, (%esp) # 4-byte Spill
+ mulxl %eax, %ebp, %edi
+ addl %ecx, %edi
+ movl 8(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %esi
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %edi
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl 4(%eax), %edx
+ mulxl 16(%esp), %ebx, %eax # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ mulxl 8(%esp), %ebx, %eax # 4-byte Folded Reload
+ movl %ebx, (%esp) # 4-byte Spill
+ mulxl 12(%esp), %ebx, %ebp # 4-byte Folded Reload
+ addl (%esp), %ebp # 4-byte Folded Reload
+ movl %eax, %edx
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edi, %ebx
+ adcl %ecx, %ebp
+ adcl %esi, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl %eax, %edx
+ imull 20(%esp), %edx # 4-byte Folded Reload
+ mulxl 40(%esp), %ecx, %esi # 4-byte Folded Reload
+ movl %esi, (%esp) # 4-byte Spill
+ mulxl 36(%esp), %esi, %ebx # 4-byte Folded Reload
+ addl %ecx, %ebx
+ mulxl 32(%esp), %ecx, %edi # 4-byte Folded Reload
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edi
+ movl 24(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ addl %eax, %esi
+ adcl %ebp, %ebx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %edx
+ movl 8(%edx), %edx
+ mulxl 16(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ mulxl 8(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ mulxl 12(%esp), %eax, %esi # 4-byte Folded Reload
+ addl 8(%esp), %esi # 4-byte Folded Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebx, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl %ecx, %esi
+ adcl %edi, %ebp
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ movl 20(%esp), %edx # 4-byte Reload
+ imull %eax, %edx
+ mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edx, %eax
+ mulxl 40(%esp), %edi, %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl 20(%esp), %edi # 4-byte Folded Reload
+ movl %eax, %edx
+ mulxl 32(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ andl $1, %ebx
+ addl 16(%esp), %ecx # 4-byte Folded Reload
+ adcl %esi, %edi
+ adcl %ebp, %edx
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %edi, %ebp
+ subl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, %esi
+ sbbl 40(%esp), %esi # 4-byte Folded Reload
+ movl %eax, %ecx
+ sbbl 32(%esp), %ecx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB38_2
+# BB#1:
+ movl %ebp, %edi
+.LBB38_2:
+ movl 64(%esp), %ebp
+ movl %edi, (%ebp)
+ testb %bl, %bl
+ jne .LBB38_4
+# BB#3:
+ movl %esi, %edx
+.LBB38_4:
+ movl %edx, 4(%ebp)
+ jne .LBB38_6
+# BB#5:
+ movl %ecx, %eax
+.LBB38_6:
+ movl %eax, 8(%ebp)
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end38:
+ .size mcl_fp_mont3Lbmi2, .Lfunc_end38-mcl_fp_mont3Lbmi2
+
+ .globl mcl_fp_montNF3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF3Lbmi2,@function
+mcl_fp_montNF3Lbmi2: # @mcl_fp_montNF3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 60(%esp), %eax
+ movl (%eax), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx
+ movl (%ecx), %ecx
+ mulxl %ecx, %esi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl %ecx, %edi, %ebp
+ addl %esi, %ebp
+ movl 8(%eax), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ mulxl %ecx, %eax, %ebx
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl 68(%esp), %esi
+ movl -4(%esi), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl %edi, %edx
+ imull %ecx, %edx
+ movl (%esi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ mulxl %ecx, %esi, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ addl %edi, %esi
+ movl 68(%esp), %esi
+ movl 4(%esi), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ mulxl %ecx, %edi, %ecx
+ adcl %ebp, %edi
+ movl 8(%esi), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ mulxl %esi, %ebp, %edx
+ adcl %eax, %ebp
+ adcl $0, %ebx
+ addl 4(%esp), %edi # 4-byte Folded Reload
+ adcl %ecx, %ebp
+ adcl %edx, %ebx
+ movl 64(%esp), %eax
+ movl 4(%eax), %edx
+ mulxl 12(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ mulxl 16(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ addl %eax, %ecx
+ mulxl 8(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl $0, %eax
+ movl 4(%esp), %edx # 4-byte Reload
+ addl %edi, %edx
+ adcl %ebp, %ecx
+ adcl %ebx, %esi
+ adcl $0, %eax
+ movl %edx, %ebp
+ imull 20(%esp), %edx # 4-byte Folded Reload
+ mulxl 32(%esp), %ebx, %edi # 4-byte Folded Reload
+ addl %ebp, %ebx
+ mulxl 28(%esp), %ebp, %ebx # 4-byte Folded Reload
+ adcl %ecx, %ebp
+ mulxl 24(%esp), %ecx, %edx # 4-byte Folded Reload
+ adcl %esi, %ecx
+ adcl $0, %eax
+ addl %edi, %ebp
+ adcl %ebx, %ecx
+ adcl %edx, %eax
+ movl 64(%esp), %edx
+ movl 8(%edx), %edx
+ mulxl 12(%esp), %esi, %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ mulxl 16(%esp), %ebx, %edi # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ addl %esi, %edi
+ mulxl 8(%esp), %ebx, %esi # 4-byte Folded Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %ebp, 16(%esp) # 4-byte Folded Spill
+ adcl %ecx, %edi
+ adcl %eax, %ebx
+ adcl $0, %esi
+ movl 20(%esp), %edx # 4-byte Reload
+ movl 16(%esp), %ecx # 4-byte Reload
+ imull %ecx, %edx
+ mulxl 32(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ movl %edx, %eax
+ mulxl 28(%esp), %ecx, %ebp # 4-byte Folded Reload
+ adcl %edi, %ecx
+ mulxl 24(%esp), %eax, %edx # 4-byte Folded Reload
+ adcl %ebx, %eax
+ adcl $0, %esi
+ addl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl %ebp, %eax
+ adcl %edx, %esi
+ movl %ecx, %ebp
+ subl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, %edi
+ sbbl 28(%esp), %edi # 4-byte Folded Reload
+ movl %esi, %edx
+ sbbl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %ebx
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ js .LBB39_2
+# BB#1:
+ movl %ebp, %ecx
+.LBB39_2:
+ movl 56(%esp), %ebx
+ movl %ecx, (%ebx)
+ js .LBB39_4
+# BB#3:
+ movl %edi, %eax
+.LBB39_4:
+ movl %eax, 4(%ebx)
+ js .LBB39_6
+# BB#5:
+ movl %edx, %esi
+.LBB39_6:
+ movl %esi, 8(%ebx)
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end39:
+ .size mcl_fp_montNF3Lbmi2, .Lfunc_end39-mcl_fp_montNF3Lbmi2
+
+ .globl mcl_fp_montRed3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed3Lbmi2,@function
+mcl_fp_montRed3Lbmi2: # @mcl_fp_montRed3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 64(%esp), %ecx
+ movl -4(%ecx), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl (%ecx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax
+ movl (%eax), %ebx
+ movl %ebx, %edx
+ imull %edi, %edx
+ movl 8(%ecx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 4(%ecx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ mulxl %edi, %edi, %eax
+ movl %edi, 16(%esp) # 4-byte Spill
+ mulxl %ecx, %ebp, %edi
+ mulxl %esi, %edx, %ecx
+ addl %ebp, %ecx
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %eax
+ addl %ebx, %edx
+ movl 60(%esp), %edx
+ adcl 4(%edx), %ecx
+ adcl 8(%edx), %edi
+ adcl 12(%edx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ movl 16(%edx), %edx
+ adcl $0, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl %ecx, %edx
+ imull 20(%esp), %edx # 4-byte Folded Reload
+ mulxl 28(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, (%esp) # 4-byte Spill
+ mulxl 24(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %ebp, 4(%esp) # 4-byte Spill
+ addl %esi, %eax
+ mulxl 32(%esp), %esi, %ebp # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl %ecx, 4(%esp) # 4-byte Folded Spill
+ adcl %edi, %eax
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, 16(%esp) # 4-byte Folded Spill
+ adcl $0, %ebx
+ movl 20(%esp), %edx # 4-byte Reload
+ imull %eax, %edx
+ mulxl 24(%esp), %ecx, %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ mulxl 28(%esp), %edi, %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ addl 8(%esp), %edi # 4-byte Folded Reload
+ movl %ecx, %edx
+ mulxl 32(%esp), %ecx, %edx # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edx
+ addl %eax, 20(%esp) # 4-byte Folded Spill
+ adcl %esi, %edi
+ adcl %ebp, %ecx
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %edi, %ebp
+ subl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ecx, %esi
+ sbbl 28(%esp), %esi # 4-byte Folded Reload
+ movl %edx, %eax
+ sbbl 32(%esp), %eax # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB40_2
+# BB#1:
+ movl %ebp, %edi
+.LBB40_2:
+ movl 56(%esp), %ebp
+ movl %edi, (%ebp)
+ testb %bl, %bl
+ jne .LBB40_4
+# BB#3:
+ movl %esi, %ecx
+.LBB40_4:
+ movl %ecx, 4(%ebp)
+ jne .LBB40_6
+# BB#5:
+ movl %eax, %edx
+.LBB40_6:
+ movl %edx, 8(%ebp)
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end40:
+ .size mcl_fp_montRed3Lbmi2, .Lfunc_end40-mcl_fp_montRed3Lbmi2
+
+ .globl mcl_fp_addPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre3Lbmi2,@function
+mcl_fp_addPre3Lbmi2: # @mcl_fp_addPre3Lbmi2
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 12(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %eax
+ adcl 8(%esi), %eax
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl %edx, 4(%esi)
+ movl %eax, 8(%esi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end41:
+ .size mcl_fp_addPre3Lbmi2, .Lfunc_end41-mcl_fp_addPre3Lbmi2
+
+ .globl mcl_fp_subPre3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre3Lbmi2,@function
+mcl_fp_subPre3Lbmi2: # @mcl_fp_subPre3Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 20(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl 12(%esp), %edi
+ movl %edx, (%edi)
+ movl %esi, 4(%edi)
+ movl %ecx, 8(%edi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end42:
+ .size mcl_fp_subPre3Lbmi2, .Lfunc_end42-mcl_fp_subPre3Lbmi2
+
+ .globl mcl_fp_shr1_3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_3Lbmi2,@function
+mcl_fp_shr1_3Lbmi2: # @mcl_fp_shr1_3Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl 8(%eax), %ecx
+ movl (%eax), %edx
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl 8(%esp), %esi
+ movl %edx, (%esi)
+ shrdl $1, %ecx, %eax
+ movl %eax, 4(%esi)
+ shrl %ecx
+ movl %ecx, 8(%esi)
+ popl %esi
+ retl
+.Lfunc_end43:
+ .size mcl_fp_shr1_3Lbmi2, .Lfunc_end43-mcl_fp_shr1_3Lbmi2
+
+ .globl mcl_fp_add3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add3Lbmi2,@function
+mcl_fp_add3Lbmi2: # @mcl_fp_add3Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %ecx
+ movl 20(%esp), %esi
+ addl (%esi), %eax
+ adcl 4(%esi), %ecx
+ movl 8(%edx), %edx
+ adcl 8(%esi), %edx
+ movl 16(%esp), %esi
+ movl %eax, (%esi)
+ movl %ecx, 4(%esi)
+ movl %edx, 8(%esi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 28(%esp), %edi
+ subl (%edi), %eax
+ sbbl 4(%edi), %ecx
+ sbbl 8(%edi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB44_2
+# BB#1: # %nocarry
+ movl %eax, (%esi)
+ movl %ecx, 4(%esi)
+ movl %edx, 8(%esi)
+.LBB44_2: # %carry
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end44:
+ .size mcl_fp_add3Lbmi2, .Lfunc_end44-mcl_fp_add3Lbmi2
+
+ .globl mcl_fp_addNF3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF3Lbmi2,@function
+mcl_fp_addNF3Lbmi2: # @mcl_fp_addNF3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 24(%esp), %esi
+ addl (%esi), %edx
+ adcl 4(%esi), %ecx
+ movl 8(%eax), %eax
+ adcl 8(%esi), %eax
+ movl 32(%esp), %ebp
+ movl %edx, %ebx
+ subl (%ebp), %ebx
+ movl %ecx, %edi
+ sbbl 4(%ebp), %edi
+ movl %eax, %esi
+ sbbl 8(%ebp), %esi
+ movl %esi, %ebp
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ js .LBB45_2
+# BB#1:
+ movl %ebx, %edx
+.LBB45_2:
+ movl 20(%esp), %ebx
+ movl %edx, (%ebx)
+ js .LBB45_4
+# BB#3:
+ movl %edi, %ecx
+.LBB45_4:
+ movl %ecx, 4(%ebx)
+ js .LBB45_6
+# BB#5:
+ movl %esi, %eax
+.LBB45_6:
+ movl %eax, 8(%ebx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end45:
+ .size mcl_fp_addNF3Lbmi2, .Lfunc_end45-mcl_fp_addNF3Lbmi2
+
+ .globl mcl_fp_sub3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub3Lbmi2,@function
+mcl_fp_sub3Lbmi2: # @mcl_fp_sub3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %edx
+ movl (%edx), %ecx
+ movl 4(%edx), %eax
+ xorl %ebx, %ebx
+ movl 28(%esp), %esi
+ subl (%esi), %ecx
+ sbbl 4(%esi), %eax
+ movl 8(%edx), %edx
+ sbbl 8(%esi), %edx
+ movl 20(%esp), %esi
+ movl %ecx, (%esi)
+ movl %eax, 4(%esi)
+ movl %edx, 8(%esi)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB46_2
+# BB#1: # %carry
+ movl 32(%esp), %edi
+ movl 4(%edi), %ebx
+ movl 8(%edi), %ebp
+ addl (%edi), %ecx
+ movl %ecx, (%esi)
+ adcl %eax, %ebx
+ movl %ebx, 4(%esi)
+ adcl %edx, %ebp
+ movl %ebp, 8(%esi)
+.LBB46_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end46:
+ .size mcl_fp_sub3Lbmi2, .Lfunc_end46-mcl_fp_sub3Lbmi2
+
+ .globl mcl_fp_subNF3Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF3Lbmi2,@function
+mcl_fp_subNF3Lbmi2: # @mcl_fp_subNF3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 28(%esp), %esi
+ subl (%esi), %ecx
+ sbbl 4(%esi), %edx
+ movl 8(%eax), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, %esi
+ sarl $31, %esi
+ movl %esi, %edi
+ shldl $1, %eax, %edi
+ movl 32(%esp), %ebx
+ andl (%ebx), %edi
+ movl 8(%ebx), %ebp
+ andl %esi, %ebp
+ andl 4(%ebx), %esi
+ addl %ecx, %edi
+ adcl %edx, %esi
+ movl 20(%esp), %ecx
+ movl %edi, (%ecx)
+ movl %esi, 4(%ecx)
+ adcl %eax, %ebp
+ movl %ebp, 8(%ecx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end47:
+ .size mcl_fp_subNF3Lbmi2, .Lfunc_end47-mcl_fp_subNF3Lbmi2
+
+ .globl mcl_fpDbl_add3Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add3Lbmi2,@function
+mcl_fpDbl_add3Lbmi2: # @mcl_fpDbl_add3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ pushl %eax
+ movl 32(%esp), %esi
+ movl 20(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 16(%esi), %edi
+ movl 12(%esi), %ebx
+ movl (%esi), %edx
+ movl 28(%esp), %eax
+ addl (%eax), %edx
+ movl 24(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%esi), %edx
+ movl 4(%esi), %esi
+ adcl 4(%eax), %esi
+ adcl 8(%eax), %edx
+ movl %esi, 4(%ecx)
+ movl 20(%eax), %ebp
+ movl %edx, 8(%ecx)
+ movl 12(%eax), %esi
+ movl 16(%eax), %edx
+ adcl %ebx, %esi
+ adcl %edi, %edx
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 36(%esp), %ecx
+ movl %esi, %ebx
+ subl (%ecx), %ebx
+ movl %edx, %edi
+ sbbl 4(%ecx), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl %ebp, %ecx
+ movl 36(%esp), %edi
+ sbbl 8(%edi), %ecx
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB48_2
+# BB#1:
+ movl %ecx, %ebp
+.LBB48_2:
+ testb %al, %al
+ jne .LBB48_4
+# BB#3:
+ movl %ebx, %esi
+.LBB48_4:
+ movl 24(%esp), %eax
+ movl %esi, 12(%eax)
+ jne .LBB48_6
+# BB#5:
+ movl (%esp), %edx # 4-byte Reload
+.LBB48_6:
+ movl %edx, 16(%eax)
+ movl %ebp, 20(%eax)
+ addl $4, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end48:
+ .size mcl_fpDbl_add3Lbmi2, .Lfunc_end48-mcl_fpDbl_add3Lbmi2
+
+ .globl mcl_fpDbl_sub3Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub3Lbmi2,@function
+mcl_fpDbl_sub3Lbmi2: # @mcl_fpDbl_sub3Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ movl 28(%esp), %ebx
+ subl (%ebx), %edx
+ sbbl 4(%ebx), %esi
+ movl 8(%ecx), %ebp
+ sbbl 8(%ebx), %ebp
+ movl 20(%esp), %eax
+ movl %edx, (%eax)
+ movl 12(%ecx), %edi
+ sbbl 12(%ebx), %edi
+ movl %esi, 4(%eax)
+ movl 16(%ecx), %esi
+ sbbl 16(%ebx), %esi
+ movl 20(%ebx), %ebx
+ movl 20(%ecx), %edx
+ movl %ebp, 8(%eax)
+ sbbl %ebx, %edx
+ movl $0, %ecx
+ sbbl $0, %ecx
+ andl $1, %ecx
+ movl 32(%esp), %ebp
+ jne .LBB49_1
+# BB#2:
+ xorl %ebx, %ebx
+ jmp .LBB49_3
+.LBB49_1:
+ movl 8(%ebp), %ebx
+.LBB49_3:
+ testb %cl, %cl
+ movl $0, %eax
+ jne .LBB49_4
+# BB#5:
+ xorl %ecx, %ecx
+ jmp .LBB49_6
+.LBB49_4:
+ movl (%ebp), %ecx
+ movl 4(%ebp), %eax
+.LBB49_6:
+ addl %edi, %ecx
+ adcl %esi, %eax
+ movl 20(%esp), %esi
+ movl %ecx, 12(%esi)
+ movl %eax, 16(%esi)
+ adcl %edx, %ebx
+ movl %ebx, 20(%esi)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end49:
+ .size mcl_fpDbl_sub3Lbmi2, .Lfunc_end49-mcl_fpDbl_sub3Lbmi2
+
+ .globl mcl_fp_mulUnitPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre4Lbmi2,@function
+mcl_fp_mulUnitPre4Lbmi2: # @mcl_fp_mulUnitPre4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %edx
+ movl 24(%esp), %eax
+ mulxl 4(%eax), %esi, %ecx
+ mulxl (%eax), %edi, %ebx
+ addl %esi, %ebx
+ mulxl 8(%eax), %ebp, %esi
+ adcl %ecx, %ebp
+ mulxl 12(%eax), %eax, %ecx
+ movl 20(%esp), %edx
+ movl %edi, (%edx)
+ movl %ebx, 4(%edx)
+ movl %ebp, 8(%edx)
+ adcl %esi, %eax
+ movl %eax, 12(%edx)
+ adcl $0, %ecx
+ movl %ecx, 16(%edx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end50:
+ .size mcl_fp_mulUnitPre4Lbmi2, .Lfunc_end50-mcl_fp_mulUnitPre4Lbmi2
+
+ .globl mcl_fpDbl_mulPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre4Lbmi2,@function
+mcl_fpDbl_mulPre4Lbmi2: # @mcl_fpDbl_mulPre4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 52(%esp), %eax
+ movl (%eax), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx
+ movl (%ecx), %ebp
+ mulxl %ebp, %esi, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ movl %ebx, %ecx
+ mulxl %ebp, %edx, %ebx
+ movl %edx, 8(%esp) # 4-byte Spill
+ addl %esi, %ebx
+ movl 8(%eax), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %esi
+ mulxl %ebp, %eax, %edi
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl 12(%esi), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ mulxl %ebp, %ebp, %esi
+ adcl %edi, %ebp
+ movl 48(%esp), %edx
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, (%edx)
+ adcl $0, %esi
+ movl 56(%esp), %edx
+ movl 4(%edx), %edi
+ movl %ecx, %edx
+ mulxl %edi, %ecx, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ addl %ebx, %ecx
+ movl 24(%esp), %edx # 4-byte Reload
+ mulxl %edi, %ebx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl %eax, %ebx
+ movl 20(%esp), %edx # 4-byte Reload
+ mulxl %edi, %eax, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl 16(%esp), %edx # 4-byte Reload
+ mulxl %edi, %edi, %edx
+ adcl %esi, %edi
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ adcl (%esp), %edi # 4-byte Folded Reload
+ adcl %edx, %ebp
+ movl 48(%esp), %edx
+ movl %ecx, 4(%edx)
+ movl 56(%esp), %ecx
+ movl 8(%ecx), %ecx
+ movl 12(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %edx, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ addl %ebx, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %ebx, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl %eax, %ebx
+ movl 20(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %esi, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl 16(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %edi, %eax
+ adcl %ebp, %edi
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ adcl %eax, %ebp
+ movl 48(%esp), %eax
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 56(%esp), %eax
+ movl 12(%eax), %edx
+ movl 52(%esp), %eax
+ mulxl (%eax), %ecx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ addl %ebx, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx
+ mulxl 4(%ebx), %ecx, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ mulxl 8(%ebx), %eax, %esi
+ adcl %edi, %eax
+ mulxl 12(%ebx), %edi, %edx
+ adcl %ebp, %edi
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ adcl %esi, %edi
+ movl 48(%esp), %esi
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%esi)
+ movl %ecx, 16(%esi)
+ movl %eax, 20(%esi)
+ movl %edi, 24(%esi)
+ adcl %edx, %ebp
+ movl %ebp, 28(%esi)
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end51:
+ .size mcl_fpDbl_mulPre4Lbmi2, .Lfunc_end51-mcl_fpDbl_mulPre4Lbmi2
+
+ .globl mcl_fpDbl_sqrPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre4Lbmi2,@function
+mcl_fpDbl_sqrPre4Lbmi2: # @mcl_fpDbl_sqrPre4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 60(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edi
+ movl 56(%esp), %ebx
+ movl %esi, %edx
+ mulxl %esi, %eax, %ebp
+ movl %eax, (%ebx)
+ movl %edi, %edx
+ mulxl %esi, %edx, %ecx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ addl %edx, %eax
+ movl 60(%esp), %edx
+ movl 8(%edx), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ mulxl %esi, %edx, %ebx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl %edx, %ebp
+ movl 60(%esp), %ecx
+ movl 12(%ecx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %esi, %esi, %ecx
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ addl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl %edi, %ebx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %edi, %ebp, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl (%esp), %edx # 4-byte Reload
+ mulxl %edi, %edi, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ adcl %ecx, %ebp
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl 16(%esp), %ebx # 4-byte Folded Reload
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ adcl %eax, %ebp
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ addl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl %edi, %esi
+ mulxl %edx, %edi, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edx, %eax
+ adcl %ebp, %edi
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebp, %edx
+ adcl %ecx, %ebp
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ movl %ebx, 8(%eax)
+ movl 60(%esp), %eax
+ movl 12(%eax), %edx
+ mulxl (%eax), %ebx, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ addl %esi, %ebx
+ mulxl 4(%eax), %esi, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ mulxl 8(%eax), %ecx, %edi
+ adcl %ebp, %ecx
+ mulxl %edx, %ebp, %edx
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 32(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ adcl %edi, %ebp
+ movl 56(%esp), %edi
+ movl %ebx, 12(%edi)
+ movl %esi, 16(%edi)
+ movl %ecx, 20(%edi)
+ movl %ebp, 24(%edi)
+ adcl %edx, %eax
+ movl %eax, 28(%edi)
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end52:
+ .size mcl_fpDbl_sqrPre4Lbmi2, .Lfunc_end52-mcl_fpDbl_sqrPre4Lbmi2
+
+ .globl mcl_fp_mont4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont4Lbmi2,@function
+mcl_fp_mont4Lbmi2: # @mcl_fp_mont4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl 88(%esp), %eax
+ movl 12(%eax), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx
+ movl (%ecx), %ecx
+ movl 8(%eax), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl (%eax), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 4(%eax), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ mulxl %ecx, %eax, %ebp
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl %ecx, %edx, %eax
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl %ecx, %ebx, %esi
+ movl %edi, %edx
+ mulxl %ecx, %edx, %ecx
+ movl %edx, 8(%esp) # 4-byte Spill
+ addl %ebx, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 96(%esp), %ebx
+ movl -4(%ebx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ imull %eax, %edx
+ movl (%ebx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 4(%ebx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ mulxl %ecx, %esi, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %eax
+ addl %esi, %eax
+ movl %eax, %ebp
+ movl 8(%ebx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ mulxl %eax, %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl 12(%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %ebx
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %ebx
+ addl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 4(%eax), %edx
+ mulxl 40(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ mulxl 28(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ mulxl 32(%esp), %ecx, %ebp # 4-byte Folded Reload
+ addl (%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ adcl %esi, 12(%esp) # 4-byte Folded Spill
+ adcl %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ imull 44(%esp), %edx # 4-byte Folded Reload
+ mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 56(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ mulxl 52(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl %eax, %ebx
+ mulxl 48(%esp), %edi, %eax # 4-byte Folded Reload
+ adcl %ecx, %edi
+ adcl $0, %eax
+ movl 16(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ movl 4(%esp), %edx # 4-byte Reload
+ addl 8(%esp), %edx # 4-byte Folded Reload
+ adcl %ebp, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 92(%esp), %edx
+ movl 8(%edx), %edx
+ mulxl 40(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ mulxl 28(%esp), %esi, %eax # 4-byte Folded Reload
+ mulxl 32(%esp), %ebp, %ecx # 4-byte Folded Reload
+ addl %esi, %ecx
+ movl %ecx, %esi
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl %edi, %ecx
+ movl %ecx, %edi
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ imull 44(%esp), %edx # 4-byte Folded Reload
+ mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 56(%esp), %esi, %ebx # 4-byte Folded Reload
+ movl %esi, 8(%esp) # 4-byte Spill
+ addl %ecx, %ebx
+ mulxl 52(%esp), %ecx, %esi # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ adcl %eax, %ecx
+ mulxl 48(%esp), %eax, %esi # 4-byte Folded Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %esi
+ movl 16(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ addl %ebp, 8(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ adcl %edi, %ecx
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 92(%esp), %edx
+ movl 12(%edx), %edx
+ mulxl 28(%esp), %ebp, %edi # 4-byte Folded Reload
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl %edi, 24(%esp) # 4-byte Spill
+ mulxl 32(%esp), %edi, %ebp # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ addl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 28(%esp) # 4-byte Spill
+ mulxl 40(%esp), %ebp, %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ mulxl 36(%esp), %edi, %edx # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ adcl %ebp, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 32(%esp), %ebp # 4-byte Reload
+ addl %ebx, %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl %ecx, 28(%esp) # 4-byte Folded Spill
+ adcl %eax, %edi
+ adcl %esi, 36(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ movl 44(%esp), %edx # 4-byte Reload
+ imull %ebp, %edx
+ mulxl 56(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ mulxl 60(%esp), %ebp, %eax # 4-byte Folded Reload
+ addl %esi, %ebp
+ mulxl 52(%esp), %esi, %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl %eax, %esi
+ movl %ebx, %edx
+ mulxl 48(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ andl $1, %ecx
+ movl 44(%esp), %ebx # 4-byte Reload
+ addl 32(%esp), %ebx # 4-byte Folded Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ adcl %edi, %esi
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %ecx
+ movl %ebp, %edi
+ subl 56(%esp), %edi # 4-byte Folded Reload
+ movl %esi, %ebx
+ sbbl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ sbbl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ sbbl 48(%esp), %ebx # 4-byte Folded Reload
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB53_2
+# BB#1:
+ movl %edi, %ebp
+.LBB53_2:
+ movl 84(%esp), %edi
+ movl %ebp, (%edi)
+ testb %cl, %cl
+ jne .LBB53_4
+# BB#3:
+ movl 56(%esp), %esi # 4-byte Reload
+.LBB53_4:
+ movl %esi, 4(%edi)
+ jne .LBB53_6
+# BB#5:
+ movl 60(%esp), %edx # 4-byte Reload
+.LBB53_6:
+ movl %edx, 8(%edi)
+ jne .LBB53_8
+# BB#7:
+ movl %ebx, %eax
+.LBB53_8:
+ movl %eax, 12(%edi)
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end53:
+ .size mcl_fp_mont4Lbmi2, .Lfunc_end53-mcl_fp_mont4Lbmi2
+
+ .globl mcl_fp_montNF4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF4Lbmi2,@function
+mcl_fp_montNF4Lbmi2: # @mcl_fp_montNF4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 76(%esp), %esi
+ movl (%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 4(%esi), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx
+ movl (%ecx), %ecx
+ mulxl %ecx, %edi, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %edx
+ mulxl %ecx, %ebp, %eax
+ movl %ebp, 40(%esp) # 4-byte Spill
+ addl %edi, %eax
+ movl 8(%esi), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ mulxl %ecx, %ebx, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl 12(%esi), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ mulxl %ecx, %esi, %edi
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %edi
+ movl 84(%esp), %ecx
+ movl -4(%ecx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ imull %ecx, %edx
+ movl 84(%esp), %ecx
+ movl (%ecx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ addl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %ecx
+ movl 4(%ecx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl %eax, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %eax
+ adcl %ebx, %ecx
+ movl %ecx, %ebp
+ movl 84(%esp), %ecx
+ movl 12(%ecx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ mulxl %ecx, %ebx, %edx
+ adcl %esi, %ebx
+ adcl $0, %edi
+ movl 8(%esp), %ecx # 4-byte Reload
+ addl %ecx, 12(%esp) # 4-byte Folded Spill
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl %eax, %ebx
+ adcl %edx, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl 4(%eax), %edx
+ mulxl 24(%esp), %esi, %edi # 4-byte Folded Reload
+ mulxl 28(%esp), %ecx, %eax # 4-byte Folded Reload
+ addl %esi, %eax
+ mulxl 16(%esp), %ebp, %esi # 4-byte Folded Reload
+ movl %esi, (%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ mulxl 20(%esp), %edi, %esi # 4-byte Folded Reload
+ adcl (%esp), %edi # 4-byte Folded Reload
+ adcl $0, %esi
+ addl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ adcl %ebx, %ebp
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %esi
+ movl %ecx, %edx
+ imull 48(%esp), %edx # 4-byte Folded Reload
+ mulxl 44(%esp), %ebx, %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ addl (%esp), %ebx # 4-byte Folded Reload
+ mulxl 40(%esp), %ebx, %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl %eax, %ebx
+ movl %ebx, %eax
+ mulxl 36(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl %ebp, %ebx
+ movl %ebx, %ebp
+ mulxl 32(%esp), %ebx, %edx # 4-byte Folded Reload
+ adcl %edi, %ebx
+ adcl $0, %esi
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl %ecx, %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ adcl %edx, %esi
+ movl 80(%esp), %ecx
+ movl 8(%ecx), %edx
+ mulxl 24(%esp), %ecx, %ebx # 4-byte Folded Reload
+ mulxl 28(%esp), %eax, %ebp # 4-byte Folded Reload
+ addl %ecx, %ebp
+ mulxl 16(%esp), %edi, %ecx # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ mulxl 20(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ecx
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp) # 4-byte Spill
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ adcl %esi, %ebx
+ adcl $0, %ecx
+ movl %eax, %edx
+ imull 48(%esp), %edx # 4-byte Folded Reload
+ mulxl 44(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ addl (%esp), %eax # 4-byte Folded Reload
+ mulxl 40(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ mulxl 36(%esp), %ebp, %esi # 4-byte Folded Reload
+ movl %esi, (%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ mulxl 32(%esp), %esi, %edx # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl %edx, %ecx
+ movl 80(%esp), %eax
+ movl 12(%eax), %edx
+ mulxl 24(%esp), %ebx, %ebp # 4-byte Folded Reload
+ mulxl 28(%esp), %edi, %eax # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ mulxl 16(%esp), %edi, %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl %ebp, %edi
+ mulxl 20(%esp), %ebp, %ebx # 4-byte Folded Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl 28(%esp), %edx # 4-byte Reload
+ addl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ adcl %esi, %edi
+ adcl %ecx, %ebp
+ adcl $0, %ebx
+ movl 48(%esp), %edx # 4-byte Reload
+ imull 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl 44(%esp), %ecx, %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ addl 28(%esp), %ecx # 4-byte Folded Reload
+ mulxl 40(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl %eax, %esi
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl %edi, %ecx
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl 32(%esp), %eax, %edx # 4-byte Folded Reload
+ adcl %ebp, %eax
+ adcl $0, %ebx
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ adcl %edx, %ebx
+ movl %esi, %edi
+ subl 44(%esp), %edi # 4-byte Folded Reload
+ movl %ecx, %ebp
+ sbbl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, %edx
+ sbbl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ sbbl 32(%esp), %edx # 4-byte Folded Reload
+ testl %edx, %edx
+ js .LBB54_2
+# BB#1:
+ movl %edi, %esi
+.LBB54_2:
+ movl 72(%esp), %edi
+ movl %esi, (%edi)
+ js .LBB54_4
+# BB#3:
+ movl %ebp, %ecx
+.LBB54_4:
+ movl %ecx, 4(%edi)
+ js .LBB54_6
+# BB#5:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB54_6:
+ movl %eax, 8(%edi)
+ js .LBB54_8
+# BB#7:
+ movl %edx, %ebx
+.LBB54_8:
+ movl %ebx, 12(%edi)
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end54:
+ .size mcl_fp_montNF4Lbmi2, .Lfunc_end54-mcl_fp_montNF4Lbmi2
+
+ .globl mcl_fp_montRed4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed4Lbmi2,@function
+mcl_fp_montRed4Lbmi2: # @mcl_fp_montRed4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 80(%esp), %ecx
+ movl -4(%ecx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl (%ecx), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp
+ movl (%ebp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ imull %eax, %edx
+ movl 12(%ecx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 8(%ecx), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 4(%ecx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl %esi, %esi, %ecx
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl %ecx, 24(%esp) # 4-byte Spill
+ mulxl %ebx, %esi, %ecx
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl %ecx, 20(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %ecx
+ mulxl %edi, %edx, %esi
+ addl %ebx, %esi
+ movl %ecx, %edi
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl 20(%esp), %ebx # 4-byte Reload
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 36(%esp), %edx # 4-byte Folded Reload
+ adcl 4(%ebp), %esi
+ adcl 8(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 12(%ebp), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ adcl 16(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 28(%ebp), %ecx
+ movl 24(%ebp), %edx
+ movl 20(%ebp), %edi
+ adcl $0, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl %esi, %edx
+ imull 40(%esp), %edx # 4-byte Folded Reload
+ mulxl %eax, %ebp, %edi
+ mulxl 44(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ addl %ebp, %eax
+ mulxl 48(%esp), %ebp, %ecx # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ movl 28(%esp), %ecx # 4-byte Reload
+ mulxl %ecx, %edi, %edx
+ adcl (%esp), %edi # 4-byte Folded Reload
+ adcl $0, %edx
+ addl %esi, 4(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl $0, 16(%esp) # 4-byte Folded Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ adcl $0, %ebx
+ movl %eax, %edx
+ imull 40(%esp), %edx # 4-byte Folded Reload
+ mulxl %ecx, %esi, %ecx
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl %ecx, 24(%esp) # 4-byte Spill
+ mulxl 32(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %esi, (%esp) # 4-byte Spill
+ movl %ecx, 4(%esp) # 4-byte Spill
+ mulxl 44(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %esi, 8(%esp) # 4-byte Spill
+ addl (%esp), %ecx # 4-byte Folded Reload
+ mulxl 48(%esp), %esi, %edx # 4-byte Folded Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %eax, 8(%esp) # 4-byte Folded Spill
+ adcl %ebp, %ecx
+ adcl %edi, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 20(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ adcl $0, %ebx
+ movl 40(%esp), %edx # 4-byte Reload
+ imull %ecx, %edx
+ mulxl 44(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ mulxl 32(%esp), %ebp, %esi # 4-byte Folded Reload
+ addl %eax, %ebp
+ movl %edx, %eax
+ mulxl 48(%esp), %edi, %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl %eax, %edx
+ mulxl 28(%esp), %edx, %esi # 4-byte Folded Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %ecx, 40(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %ebp, %ecx
+ subl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, %eax
+ sbbl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %eax
+ sbbl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ sbbl 28(%esp), %eax # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB55_2
+# BB#1:
+ movl %ecx, %ebp
+.LBB55_2:
+ movl 72(%esp), %ecx
+ movl %ebp, (%ecx)
+ testb %bl, %bl
+ jne .LBB55_4
+# BB#3:
+ movl 44(%esp), %edi # 4-byte Reload
+.LBB55_4:
+ movl %edi, 4(%ecx)
+ jne .LBB55_6
+# BB#5:
+ movl 48(%esp), %edx # 4-byte Reload
+.LBB55_6:
+ movl %edx, 8(%ecx)
+ jne .LBB55_8
+# BB#7:
+ movl %eax, %esi
+.LBB55_8:
+ movl %esi, 12(%ecx)
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end55:
+ .size mcl_fp_montRed4Lbmi2, .Lfunc_end55-mcl_fp_montRed4Lbmi2
+
+ .globl mcl_fp_addPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre4Lbmi2,@function
+mcl_fp_addPre4Lbmi2: # @mcl_fp_addPre4Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 20(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 12(%eax), %edi
+ movl 8(%eax), %eax
+ adcl 8(%esi), %eax
+ movl 12(%esi), %esi
+ movl 16(%esp), %ebx
+ movl %ecx, (%ebx)
+ movl %edx, 4(%ebx)
+ movl %eax, 8(%ebx)
+ adcl %edi, %esi
+ movl %esi, 12(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end56:
+ .size mcl_fp_addPre4Lbmi2, .Lfunc_end56-mcl_fp_addPre4Lbmi2
+
+ .globl mcl_fp_subPre4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre4Lbmi2,@function
+mcl_fp_subPre4Lbmi2: # @mcl_fp_subPre4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 28(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edi), %ebx
+ movl 12(%edi), %edi
+ movl 12(%ecx), %ecx
+ movl 20(%esp), %ebp
+ movl %edx, (%ebp)
+ movl %esi, 4(%ebp)
+ movl %ebx, 8(%ebp)
+ sbbl %edi, %ecx
+ movl %ecx, 12(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end57:
+ .size mcl_fp_subPre4Lbmi2, .Lfunc_end57-mcl_fp_subPre4Lbmi2
+
+ .globl mcl_fp_shr1_4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_4Lbmi2,@function
+mcl_fp_shr1_4Lbmi2: # @mcl_fp_shr1_4Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl 12(%eax), %ecx
+ movl 8(%eax), %edx
+ movl (%eax), %esi
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl 12(%esp), %edi
+ movl %esi, (%edi)
+ shrdl $1, %edx, %eax
+ movl %eax, 4(%edi)
+ shrdl $1, %ecx, %edx
+ movl %edx, 8(%edi)
+ shrl %ecx
+ movl %ecx, 12(%edi)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end58:
+ .size mcl_fp_shr1_4Lbmi2, .Lfunc_end58-mcl_fp_shr1_4Lbmi2
+
+ .globl mcl_fp_add4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add4Lbmi2,@function
+mcl_fp_add4Lbmi2: # @mcl_fp_add4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ movl 24(%esp), %esi
+ addl (%esi), %eax
+ adcl 4(%esi), %ecx
+ movl 8(%edi), %edx
+ adcl 8(%esi), %edx
+ movl 12(%esi), %esi
+ adcl 12(%edi), %esi
+ movl 20(%esp), %edi
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ movl %edx, 8(%edi)
+ movl %esi, 12(%edi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 32(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %ecx
+ sbbl 8(%ebp), %edx
+ sbbl 12(%ebp), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB59_2
+# BB#1: # %nocarry
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ movl %edx, 8(%edi)
+ movl %esi, 12(%edi)
+.LBB59_2: # %carry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end59:
+ .size mcl_fp_add4Lbmi2, .Lfunc_end59-mcl_fp_add4Lbmi2
+
+ .globl mcl_fp_addNF4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF4Lbmi2,@function
+mcl_fp_addNF4Lbmi2: # @mcl_fp_addNF4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $8, %esp
+ movl 36(%esp), %edx
+ movl (%edx), %esi
+ movl 4(%edx), %ecx
+ movl 32(%esp), %edi
+ addl (%edi), %esi
+ adcl 4(%edi), %ecx
+ movl 12(%edx), %ebp
+ movl 8(%edx), %edx
+ adcl 8(%edi), %edx
+ adcl 12(%edi), %ebp
+ movl 40(%esp), %eax
+ movl %esi, %ebx
+ subl (%eax), %ebx
+ movl %ecx, %edi
+ sbbl 4(%eax), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl 40(%esp), %eax
+ sbbl 8(%eax), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ movl 40(%esp), %eax
+ sbbl 12(%eax), %edi
+ testl %edi, %edi
+ js .LBB60_2
+# BB#1:
+ movl %ebx, %esi
+.LBB60_2:
+ movl 28(%esp), %ebx
+ movl %esi, (%ebx)
+ js .LBB60_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB60_4:
+ movl %ecx, 4(%ebx)
+ js .LBB60_6
+# BB#5:
+ movl 4(%esp), %edx # 4-byte Reload
+.LBB60_6:
+ movl %edx, 8(%ebx)
+ js .LBB60_8
+# BB#7:
+ movl %edi, %ebp
+.LBB60_8:
+ movl %ebp, 12(%ebx)
+ addl $8, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end60:
+ .size mcl_fp_addNF4Lbmi2, .Lfunc_end60-mcl_fp_addNF4Lbmi2
+
+ .globl mcl_fp_sub4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub4Lbmi2,@function
+mcl_fp_sub4Lbmi2: # @mcl_fp_sub4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 28(%esp), %edi
+ subl (%edi), %eax
+ sbbl 4(%edi), %ecx
+ movl 8(%esi), %edx
+ sbbl 8(%edi), %edx
+ movl 12(%esi), %esi
+ sbbl 12(%edi), %esi
+ movl 20(%esp), %edi
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ movl %edx, 8(%edi)
+ movl %esi, 12(%edi)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB61_2
+# BB#1: # %carry
+ movl 32(%esp), %ebx
+ addl (%ebx), %eax
+ movl 8(%ebx), %ebp
+ adcl 4(%ebx), %ecx
+ movl 12(%ebx), %ebx
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ adcl %edx, %ebp
+ movl %ebp, 8(%edi)
+ adcl %esi, %ebx
+ movl %ebx, 12(%edi)
+.LBB61_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end61:
+ .size mcl_fp_sub4Lbmi2, .Lfunc_end61-mcl_fp_sub4Lbmi2
+
+ .globl mcl_fp_subNF4Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF4Lbmi2,@function
+mcl_fp_subNF4Lbmi2: # @mcl_fp_subNF4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $8, %esp
+ movl 32(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 36(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ sbbl 4(%esi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 12(%eax), %edi
+ movl 8(%eax), %edx
+ sbbl 8(%esi), %edx
+ sbbl 12(%esi), %edi
+ movl %edi, %esi
+ sarl $31, %esi
+ movl 40(%esp), %eax
+ movl 12(%eax), %ebp
+ andl %esi, %ebp
+ movl 8(%eax), %ecx
+ andl %esi, %ecx
+ movl 40(%esp), %eax
+ movl 4(%eax), %eax
+ andl %esi, %eax
+ movl 40(%esp), %ebx
+ andl (%ebx), %esi
+ addl (%esp), %esi # 4-byte Folded Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl 28(%esp), %ebx
+ movl %esi, (%ebx)
+ adcl %edx, %ecx
+ movl %eax, 4(%ebx)
+ movl %ecx, 8(%ebx)
+ adcl %edi, %ebp
+ movl %ebp, 12(%ebx)
+ addl $8, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end62:
+ .size mcl_fp_subNF4Lbmi2, .Lfunc_end62-mcl_fp_subNF4Lbmi2
+
+ .globl mcl_fpDbl_add4Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add4Lbmi2,@function
+mcl_fpDbl_add4Lbmi2: # @mcl_fpDbl_add4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $12, %esp
+ movl 40(%esp), %eax
+ movl (%eax), %edi
+ movl 4(%eax), %edx
+ movl 36(%esp), %esi
+ addl (%esi), %edi
+ adcl 4(%esi), %edx
+ movl 8(%eax), %ebx
+ adcl 8(%esi), %ebx
+ movl 12(%esi), %ebp
+ movl 32(%esp), %ecx
+ movl %edi, (%ecx)
+ movl 16(%esi), %edi
+ adcl 12(%eax), %ebp
+ adcl 16(%eax), %edi
+ movl %edx, 4(%ecx)
+ movl 28(%eax), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %ebx, 8(%ecx)
+ movl 24(%eax), %ebx
+ movl 20(%eax), %eax
+ movl %ebp, 12(%ecx)
+ movl 20(%esi), %edx
+ adcl %eax, %edx
+ movl 28(%esi), %ecx
+ movl 24(%esi), %ebp
+ adcl %ebx, %ebp
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 44(%esp), %eax
+ movl %edi, %esi
+ subl (%eax), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 4(%eax), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ sbbl 8(%eax), %esi
+ sbbl 12(%eax), %ecx
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB63_2
+# BB#1:
+ movl %esi, %ebp
+.LBB63_2:
+ testb %bl, %bl
+ jne .LBB63_4
+# BB#3:
+ movl (%esp), %edi # 4-byte Reload
+.LBB63_4:
+ movl 32(%esp), %eax
+ movl %edi, 16(%eax)
+ jne .LBB63_6
+# BB#5:
+ movl 4(%esp), %edx # 4-byte Reload
+.LBB63_6:
+ movl %edx, 20(%eax)
+ movl %ebp, 24(%eax)
+ movl 8(%esp), %edx # 4-byte Reload
+ jne .LBB63_8
+# BB#7:
+ movl %ecx, %edx
+.LBB63_8:
+ movl %edx, 28(%eax)
+ addl $12, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end63:
+ .size mcl_fpDbl_add4Lbmi2, .Lfunc_end63-mcl_fpDbl_add4Lbmi2
+
+ .globl mcl_fpDbl_sub4Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub4Lbmi2,@function
+mcl_fpDbl_sub4Lbmi2: # @mcl_fpDbl_sub4Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ pushl %eax
+ movl 28(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 32(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %esi
+ movl 8(%eax), %ebx
+ sbbl 8(%ebp), %ebx
+ movl 24(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 12(%eax), %edx
+ sbbl 12(%ebp), %edx
+ movl %esi, 4(%ecx)
+ movl 16(%eax), %edi
+ sbbl 16(%ebp), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%ebp), %esi
+ movl %edx, 12(%ecx)
+ movl 20(%eax), %ebx
+ sbbl %esi, %ebx
+ movl 24(%ebp), %edx
+ movl 24(%eax), %esi
+ sbbl %edx, %esi
+ movl 28(%ebp), %edx
+ movl 28(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl $0, %edx
+ sbbl $0, %edx
+ andl $1, %edx
+ movl 36(%esp), %ecx
+ movl (%ecx), %eax
+ jne .LBB64_1
+# BB#2:
+ xorl %ebp, %ebp
+ jmp .LBB64_3
+.LBB64_1:
+ movl 4(%ecx), %ebp
+.LBB64_3:
+ testb %dl, %dl
+ jne .LBB64_5
+# BB#4:
+ movl $0, %eax
+.LBB64_5:
+ jne .LBB64_6
+# BB#7:
+ movl $0, %edx
+ jmp .LBB64_8
+.LBB64_6:
+ movl 12(%ecx), %edx
+.LBB64_8:
+ jne .LBB64_9
+# BB#10:
+ xorl %ecx, %ecx
+ jmp .LBB64_11
+.LBB64_9:
+ movl 8(%ecx), %ecx
+.LBB64_11:
+ addl %edi, %eax
+ adcl %ebx, %ebp
+ movl 24(%esp), %edi
+ movl %eax, 16(%edi)
+ adcl %esi, %ecx
+ movl %ebp, 20(%edi)
+ movl %ecx, 24(%edi)
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%edi)
+ addl $4, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end64:
+ .size mcl_fpDbl_sub4Lbmi2, .Lfunc_end64-mcl_fpDbl_sub4Lbmi2
+
+ .globl mcl_fp_mulUnitPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre5Lbmi2,@function
+mcl_fp_mulUnitPre5Lbmi2: # @mcl_fp_mulUnitPre5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $8, %esp
+ movl 36(%esp), %edx
+ movl 32(%esp), %ecx
+ mulxl 4(%ecx), %esi, %eax
+ mulxl (%ecx), %edi, %ebx
+ movl %edi, 4(%esp) # 4-byte Spill
+ addl %esi, %ebx
+ mulxl 8(%ecx), %ebp, %esi
+ adcl %eax, %ebp
+ mulxl 12(%ecx), %eax, %edi
+ movl %edi, (%esp) # 4-byte Spill
+ adcl %esi, %eax
+ mulxl 16(%ecx), %ecx, %edx
+ movl 28(%esp), %esi
+ movl 4(%esp), %edi # 4-byte Reload
+ movl %edi, (%esi)
+ movl %ebx, 4(%esi)
+ movl %ebp, 8(%esi)
+ movl %eax, 12(%esi)
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esi)
+ adcl $0, %edx
+ movl %edx, 20(%esi)
+ addl $8, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end65:
+ .size mcl_fp_mulUnitPre5Lbmi2, .Lfunc_end65-mcl_fp_mulUnitPre5Lbmi2
+
+ .globl mcl_fpDbl_mulPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre5Lbmi2,@function
+mcl_fpDbl_mulPre5Lbmi2: # @mcl_fpDbl_mulPre5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 68(%esp), %eax
+ movl (%eax), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl 72(%esp), %eax
+ movl (%eax), %ebp
+ mulxl %ebp, %esi, %edi
+ movl %ebx, %edx
+ mulxl %ebp, %edx, %eax
+ movl %edx, 20(%esp) # 4-byte Spill
+ addl %esi, %eax
+ movl 8(%ecx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %ebp, %esi, %ebx
+ adcl %edi, %esi
+ movl 12(%ecx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ mulxl %ebp, %edi, %ecx
+ adcl %ebx, %edi
+ movl 68(%esp), %edx
+ movl 16(%edx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ mulxl %ebp, %ebp, %edx
+ adcl %ecx, %ebp
+ movl 64(%esp), %ecx
+ movl 20(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%ecx)
+ adcl $0, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx
+ movl 4(%ecx), %ebx
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %ecx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ addl %eax, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %ecx, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %esi, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl 28(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %edi, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl %ebp, %edi
+ movl 24(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %eax, %edx
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebx
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl %edx, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl 36(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%eax)
+ movl 68(%esp), %ebx
+ movl (%ebx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl 8(%eax), %eax
+ mulxl %eax, %edx, %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ addl %ecx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 4(%ebx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %eax, %edx, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl %esi, %edx
+ movl %edx, %ebp
+ movl 8(%ebx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl %edi, %ecx
+ movl 12(%ebx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl 16(%ebx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ mulxl %eax, %edi, %edx
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ addl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ adcl (%esp), %edi # 4-byte Folded Reload
+ adcl %edx, %ebx
+ movl 64(%esp), %eax
+ movl 20(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%eax)
+ movl 72(%esp), %eax
+ movl 12(%eax), %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ addl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebp, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %ecx, %ebp
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl 28(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl %edi, %edx
+ movl %edx, %esi
+ movl 24(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edi, %edx
+ adcl %ebx, %edi
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ adcl %edx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%eax)
+ movl 72(%esp), %eax
+ movl 16(%eax), %edx
+ movl 68(%esp), %eax
+ mulxl (%eax), %esi, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ addl %ebp, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ mulxl 4(%eax), %ebx, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl %ecx, %ebx
+ mulxl 8(%eax), %esi, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ mulxl 12(%eax), %ecx, %ebp
+ adcl %edi, %ecx
+ mulxl 16(%eax), %edi, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl %ebp, %edi
+ movl 64(%esp), %ebp
+ movl 40(%esp), %edx # 4-byte Reload
+ movl %edx, 16(%ebp)
+ movl %ebx, 20(%ebp)
+ movl %esi, 24(%ebp)
+ movl %ecx, 28(%ebp)
+ movl %edi, 32(%ebp)
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ebp)
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end66:
+ .size mcl_fpDbl_mulPre5Lbmi2, .Lfunc_end66-mcl_fpDbl_mulPre5Lbmi2
+
+ .globl mcl_fpDbl_sqrPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre5Lbmi2,@function
+mcl_fpDbl_sqrPre5Lbmi2: # @mcl_fpDbl_sqrPre5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %ecx
+ movl (%ecx), %edi
+ movl 4(%ecx), %esi
+ movl %esi, %edx
+ mulxl %edi, %ebp, %ebx
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl %edi, %edx, %eax
+ movl %edx, 16(%esp) # 4-byte Spill
+ addl %ebp, %eax
+ movl 8(%ecx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %edi, %ebp, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %ebx, %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 12(%ecx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ mulxl %edi, %ecx, %ebx
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 64(%esp), %edx
+ movl 16(%edx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %edi, %edi, %edx
+ adcl %ebx, %edi
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl 60(%esp), %ebp
+ movl %ebx, (%ebp)
+ adcl $0, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ addl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl %esi, %ebx, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %esi, %ebp, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl %ecx, %ebp
+ movl 20(%esp), %edx # 4-byte Reload
+ mulxl %esi, %ecx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl %edi, %ecx
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %esi, %edi, %edx
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ adcl %edx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %eax
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%eax)
+ movl 64(%esp), %eax
+ movl (%eax), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ mulxl %esi, %edx, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ addl %ebx, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl 4(%eax), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ mulxl %esi, %ebx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl %esi, %edx
+ mulxl %esi, %ebp, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ adcl %ecx, %ebp
+ movl 64(%esp), %ecx
+ movl 12(%ecx), %esi
+ movl %esi, %edx
+ mulxl %eax, %eax, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl %edi, %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl 36(%esp), %ecx, %edx # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl 16(%esp), %ebx # 4-byte Folded Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ adcl %edx, %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ mulxl %esi, %edx, %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ addl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 20(%esp), %edx # 4-byte Reload
+ mulxl %esi, %edx, %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl %ebp, %edx
+ movl %edx, %edi
+ movl 60(%esp), %eax
+ movl 28(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%eax)
+ movl 64(%esp), %eax
+ movl 8(%eax), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %esi, %ebx, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, %edx
+ mulxl %esi, %ebp, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl %ecx, %ebp
+ movl 16(%eax), %ecx
+ movl %ecx, %edx
+ mulxl %esi, %esi, %edx
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ adcl %edx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 60(%esp), %edx
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edx)
+ movl %ecx, %edx
+ movl 64(%esp), %eax
+ mulxl (%eax), %edx, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ movl 64(%esp), %eax
+ mulxl 4(%eax), %edi, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %ebx, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl %ecx, %edx
+ mulxl 12(%eax), %ebp, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl %esi, %ebp
+ movl %ecx, %edx
+ mulxl %ecx, %edx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 28(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl 60(%esp), %esi
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%esi)
+ movl %edi, 20(%esi)
+ movl %ebx, 24(%esi)
+ movl %ebp, 28(%esi)
+ movl %edx, 32(%esi)
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end67:
+ .size mcl_fpDbl_sqrPre5Lbmi2, .Lfunc_end67-mcl_fpDbl_sqrPre5Lbmi2
+
+ .globl mcl_fp_mont5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont5Lbmi2,@function
+mcl_fp_mont5Lbmi2: # @mcl_fp_mont5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %eax
+ movl 16(%eax), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx
+ movl (%ecx), %ecx
+ movl 12(%eax), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 8(%eax), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl (%eax), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 4(%eax), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ mulxl %ecx, %edx, %eax
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl %ecx, %edx, %esi
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl %ecx, %edx, %edi
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ mulxl %ecx, %edx, %ebp
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl %ecx, %edx, %ecx
+ movl %edx, 16(%esp) # 4-byte Spill
+ addl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 24(%esp) # 4-byte Spill
+ adcl 76(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp
+ movl -4(%ebp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ imull %eax, %edx
+ movl (%ebp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 4(%ebp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %ebx
+ mulxl %eax, %esi, %edi
+ movl %esi, 12(%esp) # 4-byte Spill
+ addl %ecx, %edi
+ movl 8(%ebp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %ecx
+ adcl %ebx, %esi
+ movl 12(%ebp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ mulxl %eax, %eax, %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ adcl %ecx, %eax
+ movl %eax, %ecx
+ movl 16(%ebp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %eax
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %eax
+ movl 12(%esp), %edx # 4-byte Reload
+ addl 16(%esp), %edx # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ andl $1, %edi
+ movl 112(%esp), %edx
+ movl 4(%edx), %edx
+ mulxl 48(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl 40(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ mulxl 44(%esp), %esi, %ebp # 4-byte Folded Reload
+ addl %eax, %ebp
+ mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 8(%esp) # 4-byte Spill
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 24(%esp), %ebp # 4-byte Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 24(%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl %edi, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 76(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, %edi
+ mulxl 72(%esp), %esi, %ebx # 4-byte Folded Reload
+ adcl %eax, %esi
+ mulxl 68(%esp), %ecx, %ebp # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ mulxl 64(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl %ebp, %edx
+ movl %edx, %ebx
+ adcl $0, %eax
+ movl 28(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ movl 12(%esp), %ebp # 4-byte Reload
+ addl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %esi
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 112(%esp), %edx
+ movl 8(%edx), %edx
+ mulxl 48(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl 40(%esp), %edi, %ebx # 4-byte Folded Reload
+ mulxl 44(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ addl %edi, %ecx
+ mulxl 56(%esp), %eax, %edx # 4-byte Folded Reload
+ adcl %ebx, %eax
+ movl %eax, %edi
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, %eax
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 24(%esp), %ebp # 4-byte Reload
+ addl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 32(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 76(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ mulxl 72(%esp), %ecx, %ebp # 4-byte Folded Reload
+ adcl %eax, %ecx
+ mulxl 68(%esp), %eax, %edi # 4-byte Folded Reload
+ adcl %ebp, %eax
+ mulxl 64(%esp), %ebx, %ebp # 4-byte Folded Reload
+ adcl %edi, %ebx
+ adcl $0, %ebp
+ movl 28(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ movl 12(%esp), %edi # 4-byte Reload
+ addl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 112(%esp), %edx
+ movl 12(%edx), %edx
+ mulxl 48(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl 40(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ mulxl 44(%esp), %esi, %edi # 4-byte Folded Reload
+ addl %eax, %edi
+ mulxl 56(%esp), %eax, %edx # 4-byte Folded Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl %ecx, %edx
+ movl %edx, %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 8(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 76(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, %ebp
+ mulxl 72(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, %eax
+ mulxl 68(%esp), %ebx, %edi # 4-byte Folded Reload
+ adcl %ecx, %ebx
+ mulxl 64(%esp), %ecx, %esi # 4-byte Folded Reload
+ adcl %edi, %ecx
+ adcl $0, %esi
+ movl 28(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ movl 12(%esp), %edi # 4-byte Reload
+ addl 8(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 112(%esp), %edx
+ movl 16(%edx), %edx
+ mulxl 40(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ mulxl 44(%esp), %eax, %edi # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ addl %ebp, %edi
+ mulxl 48(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 52(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %ebp, 32(%esp) # 4-byte Spill
+ mulxl 56(%esp), %ebp, %edx # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebp
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 44(%esp), %eax # 4-byte Reload
+ addl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl %ebx, 52(%esp) # 4-byte Folded Spill
+ adcl %ecx, 56(%esp) # 4-byte Folded Spill
+ adcl %esi, %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ movl 60(%esp), %edx # 4-byte Reload
+ imull %eax, %edx
+ mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ mulxl 80(%esp), %ebp, %ecx # 4-byte Folded Reload
+ addl %eax, %ebp
+ mulxl 72(%esp), %edi, %eax # 4-byte Folded Reload
+ adcl %ecx, %edi
+ movl %edx, %ecx
+ mulxl 68(%esp), %esi, %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %eax, %esi
+ movl %ecx, %edx
+ mulxl 64(%esp), %edx, %ecx # 4-byte Folded Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %ecx
+ andl $1, %ebx
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 44(%esp), %eax # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %ebx
+ subl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl %edi, %eax
+ sbbl 80(%esp), %eax # 4-byte Folded Reload
+ movl %esi, %ebp
+ sbbl 72(%esp), %ebp # 4-byte Folded Reload
+ sbbl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ sbbl 64(%esp), %edx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB68_2
+# BB#1:
+ movl %eax, %edi
+.LBB68_2:
+ testb %bl, %bl
+ movl 44(%esp), %ebx # 4-byte Reload
+ jne .LBB68_4
+# BB#3:
+ movl 76(%esp), %ebx # 4-byte Reload
+.LBB68_4:
+ movl 104(%esp), %eax
+ movl %ebx, (%eax)
+ movl %edi, 4(%eax)
+ jne .LBB68_6
+# BB#5:
+ movl %ebp, %esi
+.LBB68_6:
+ movl %esi, 8(%eax)
+ movl 60(%esp), %esi # 4-byte Reload
+ jne .LBB68_8
+# BB#7:
+ movl 80(%esp), %esi # 4-byte Reload
+.LBB68_8:
+ movl %esi, 12(%eax)
+ jne .LBB68_10
+# BB#9:
+ movl %edx, %ecx
+.LBB68_10:
+ movl %ecx, 16(%eax)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end68:
+ .size mcl_fp_mont5Lbmi2, .Lfunc_end68-mcl_fp_mont5Lbmi2
+
+ .globl mcl_fp_montNF5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF5Lbmi2,@function
+mcl_fp_montNF5Lbmi2: # @mcl_fp_montNF5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $68, %esp
+ movl 92(%esp), %edi
+ movl (%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 4(%edi), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx
+ movl (%ecx), %ebx
+ mulxl %ebx, %ecx, %esi
+ movl %eax, %edx
+ mulxl %ebx, %edx, %eax
+ movl %edx, 60(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ movl %eax, %ecx
+ movl 8(%edi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %ebx, %eax, %ebp
+ adcl %esi, %eax
+ movl %eax, %esi
+ movl 12(%edi), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ mulxl %ebx, %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 16(%edi), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ mulxl %ebx, %edx, %eax
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx
+ movl -4(%ebx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ movl %edi, %edx
+ imull %eax, %edx
+ movl (%ebx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ mulxl %eax, %ebp, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ addl %edi, %ebp
+ movl 4(%ebx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ mulxl %eax, %eax, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl %ecx, %eax
+ movl %eax, %edi
+ movl 8(%ebx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ mulxl %eax, %eax, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl %esi, %eax
+ movl %eax, %esi
+ movl 12(%ebx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %ebp
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl 16(%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %edx
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ adcl %edx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl 4(%eax), %edx
+ mulxl 36(%esp), %ecx, %esi # 4-byte Folded Reload
+ mulxl 40(%esp), %edi, %eax # 4-byte Folded Reload
+ addl %ecx, %eax
+ mulxl 32(%esp), %ebp, %ecx # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl %esi, %ebp
+ mulxl 28(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl (%esp), %esi # 4-byte Folded Reload
+ mulxl 24(%esp), %edx, %ecx # 4-byte Folded Reload
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %ecx
+ addl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl %edi, %edx
+ imull 44(%esp), %edx # 4-byte Folded Reload
+ mulxl 64(%esp), %ebx, %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ addl %edi, %ebx
+ mulxl 60(%esp), %edi, %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl %eax, %edi
+ mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl %ebp, %ecx
+ mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload
+ adcl %esi, %eax
+ mulxl 48(%esp), %ebx, %edx # 4-byte Folded Reload
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl 20(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ adcl %edx, %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl 8(%eax), %edx
+ mulxl 36(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 40(%esp), %ebp, %esi # 4-byte Folded Reload
+ addl %ecx, %esi
+ mulxl 32(%esp), %edi, %ecx # 4-byte Folded Reload
+ adcl %eax, %edi
+ mulxl 28(%esp), %ebx, %eax # 4-byte Folded Reload
+ movl %eax, (%esp) # 4-byte Spill
+ adcl %ecx, %ebx
+ mulxl 24(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %eax
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl %eax, %edx
+ imull 44(%esp), %edx # 4-byte Folded Reload
+ mulxl 64(%esp), %ebp, %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ addl %eax, %ebp
+ mulxl 60(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl %esi, %ebp
+ movl %ebp, %esi
+ mulxl 56(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ movl %ebp, %eax
+ mulxl 52(%esp), %ebp, %edi # 4-byte Folded Reload
+ adcl %ebx, %ebp
+ movl %ebp, %ebx
+ mulxl 48(%esp), %ebp, %edx # 4-byte Folded Reload
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl %edx, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl 12(%eax), %edx
+ mulxl 36(%esp), %ecx, %esi # 4-byte Folded Reload
+ mulxl 40(%esp), %ebx, %ebp # 4-byte Folded Reload
+ addl %ecx, %ebp
+ mulxl 32(%esp), %ecx, %edi # 4-byte Folded Reload
+ adcl %esi, %ecx
+ mulxl 28(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, (%esp) # 4-byte Spill
+ adcl %edi, %esi
+ mulxl 24(%esp), %edi, %eax # 4-byte Folded Reload
+ adcl (%esp), %edi # 4-byte Folded Reload
+ adcl $0, %eax
+ addl 16(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ imull 44(%esp), %edx # 4-byte Folded Reload
+ mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ addl 16(%esp), %ebx # 4-byte Folded Reload
+ mulxl 60(%esp), %ebx, %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ mulxl 56(%esp), %eax, %ebx # 4-byte Folded Reload
+ adcl %ecx, %eax
+ mulxl 52(%esp), %ecx, %ebp # 4-byte Folded Reload
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ mulxl 48(%esp), %ecx, %edx # 4-byte Folded Reload
+ adcl %edi, %ecx
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 12(%esp), %edi # 4-byte Reload
+ addl %edi, 16(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl %ebx, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ adcl %edx, %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl 16(%eax), %edx
+ mulxl 36(%esp), %eax, %ebp # 4-byte Folded Reload
+ mulxl 40(%esp), %edi, %ebx # 4-byte Folded Reload
+ addl %eax, %ebx
+ mulxl 32(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ mulxl 28(%esp), %ebp, %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ mulxl 24(%esp), %edx, %esi # 4-byte Folded Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl 44(%esp), %edx # 4-byte Reload
+ imull %edi, %edx
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ addl %edi, %ecx
+ mulxl 60(%esp), %edi, %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ movl %edx, %eax
+ mulxl 56(%esp), %ebx, %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ mulxl 52(%esp), %ecx, %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %ebp, %ecx
+ movl %eax, %edx
+ mulxl 48(%esp), %ebp, %edx # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %esi
+ addl 44(%esp), %edi # 4-byte Folded Reload
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl %edx, %esi
+ movl %edi, %eax
+ subl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ sbbl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ sbbl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ sbbl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %esi, %edx
+ sbbl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ sarl $31, %edx
+ testl %edx, %edx
+ js .LBB69_2
+# BB#1:
+ movl 40(%esp), %edi # 4-byte Reload
+.LBB69_2:
+ movl 88(%esp), %edx
+ movl %edi, (%edx)
+ js .LBB69_4
+# BB#3:
+ movl 44(%esp), %ebx # 4-byte Reload
+.LBB69_4:
+ movl %ebx, 4(%edx)
+ js .LBB69_6
+# BB#5:
+ movl 56(%esp), %ecx # 4-byte Reload
+.LBB69_6:
+ movl %ecx, 8(%edx)
+ js .LBB69_8
+# BB#7:
+ movl 60(%esp), %ebp # 4-byte Reload
+.LBB69_8:
+ movl %ebp, 12(%edx)
+ js .LBB69_10
+# BB#9:
+ movl 64(%esp), %esi # 4-byte Reload
+.LBB69_10:
+ movl %esi, 16(%edx)
+ addl $68, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end69:
+ .size mcl_fp_montNF5Lbmi2, .Lfunc_end69-mcl_fp_montNF5Lbmi2
+
+ .globl mcl_fp_montRed5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed5Lbmi2,@function
+mcl_fp_montRed5Lbmi2: # @mcl_fp_montRed5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $68, %esp
+ movl 96(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl (%eax), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx
+ movl (%ecx), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ imull %esi, %edx
+ movl 16(%eax), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 12(%eax), %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 8(%eax), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ mulxl %esi, %esi, %eax
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl %ebx, %esi, %eax
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ mulxl %ebp, %ebp, %eax
+ mulxl %ecx, %esi, %ecx
+ mulxl %edi, %edx, %ebx
+ addl %esi, %ebx
+ adcl %ebp, %ecx
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl 44(%esp), %edx # 4-byte Folded Reload
+ movl 92(%esp), %ebp
+ adcl 4(%ebp), %ebx
+ adcl 8(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 12(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 16(%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 20(%ebp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 36(%ebp), %edx
+ movl 32(%ebp), %esi
+ movl 28(%ebp), %edi
+ movl 24(%ebp), %eax
+ adcl $0, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, %esi
+ movl %ebx, %edx
+ imull 48(%esp), %edx # 4-byte Folded Reload
+ mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ mulxl 56(%esp), %ebp, %eax # 4-byte Folded Reload
+ mulxl 52(%esp), %edi, %ecx # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ addl %ebp, %ecx
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebp
+ mulxl 64(%esp), %eax, %edi # 4-byte Folded Reload
+ movl %edi, (%esp) # 4-byte Spill
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ mulxl %eax, %edi, %edx
+ adcl (%esp), %edi # 4-byte Folded Reload
+ adcl $0, %edx
+ addl %ebx, 4(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 28(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 20(%esp) # 4-byte Folded Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %ecx, %edx
+ imull 48(%esp), %edx # 4-byte Folded Reload
+ mulxl %eax, %edi, %eax
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl 56(%esp), %edi, %eax # 4-byte Folded Reload
+ movl %eax, (%esp) # 4-byte Spill
+ mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ addl %edi, %ebp
+ mulxl 60(%esp), %ebx, %eax # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ mulxl 64(%esp), %edi, %edx # 4-byte Folded Reload
+ adcl %eax, %edi
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ecx, 8(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ imull 48(%esp), %edx # 4-byte Folded Reload
+ mulxl 40(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 52(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ mulxl 64(%esp), %edx, %ecx # 4-byte Folded Reload
+ adcl %eax, %edx
+ movl %edx, %eax
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebp, 12(%esp) # 4-byte Folded Spill
+ adcl %ebx, %esi
+ adcl %edi, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 16(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl 48(%esp), %edx # 4-byte Reload
+ imull %esi, %edx
+ mulxl 52(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 56(%esp), %edi, %eax # 4-byte Folded Reload
+ addl %ecx, %edi
+ movl %edx, %ebp
+ mulxl 60(%esp), %ecx, %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %eax, %ecx
+ movl %ebp, %edx
+ mulxl 64(%esp), %eax, %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, %edx
+ mulxl 40(%esp), %ebp, %edx # 4-byte Folded Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edx
+ addl %esi, 48(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, %esi
+ adcl $0, %esi
+ movl %edi, %ebx
+ subl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ sbbl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ sbbl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebx
+ sbbl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ sbbl 40(%esp), %ebp # 4-byte Folded Reload
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB70_2
+# BB#1:
+ movl 56(%esp), %ecx # 4-byte Reload
+.LBB70_2:
+ movl %esi, %eax
+ testb %al, %al
+ jne .LBB70_4
+# BB#3:
+ movl 52(%esp), %edi # 4-byte Reload
+.LBB70_4:
+ movl 88(%esp), %esi
+ movl %edi, (%esi)
+ movl %ecx, 4(%esi)
+ movl 48(%esp), %eax # 4-byte Reload
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB70_6
+# BB#5:
+ movl %ebx, %ecx
+.LBB70_6:
+ movl %ecx, 8(%esi)
+ jne .LBB70_8
+# BB#7:
+ movl 64(%esp), %eax # 4-byte Reload
+.LBB70_8:
+ movl %eax, 12(%esi)
+ jne .LBB70_10
+# BB#9:
+ movl %ebp, %edx
+.LBB70_10:
+ movl %edx, 16(%esi)
+ addl $68, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end70:
+ .size mcl_fp_montRed5Lbmi2, .Lfunc_end70-mcl_fp_montRed5Lbmi2
+
+ .globl mcl_fp_addPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre5Lbmi2,@function
+mcl_fp_addPre5Lbmi2: # @mcl_fp_addPre5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 24(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %edi
+ adcl 8(%esi), %edi
+ movl 12(%esi), %ebx
+ movl 16(%esi), %esi
+ adcl 12(%eax), %ebx
+ movl 16(%eax), %eax
+ movl 20(%esp), %ebp
+ movl %ecx, (%ebp)
+ movl %edx, 4(%ebp)
+ movl %edi, 8(%ebp)
+ movl %ebx, 12(%ebp)
+ adcl %esi, %eax
+ movl %eax, 16(%ebp)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end71:
+ .size mcl_fp_addPre5Lbmi2, .Lfunc_end71-mcl_fp_addPre5Lbmi2
+
+ .globl mcl_fp_subPre5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre5Lbmi2,@function
+mcl_fp_subPre5Lbmi2: # @mcl_fp_subPre5Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %edx
+ xorl %eax, %eax
+ movl 20(%esp), %esi
+ subl (%esi), %edx
+ movl 12(%esp), %edi
+ movl %edx, (%edi)
+ movl 4(%ecx), %edx
+ sbbl 4(%esi), %edx
+ movl %edx, 4(%edi)
+ movl 8(%ecx), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, 8(%edi)
+ movl 12(%ecx), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%edi)
+ movl 16(%esi), %edx
+ movl 16(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 16(%edi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end72:
+ .size mcl_fp_subPre5Lbmi2, .Lfunc_end72-mcl_fp_subPre5Lbmi2
+
+ .globl mcl_fp_shr1_5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_5Lbmi2,@function
+mcl_fp_shr1_5Lbmi2: # @mcl_fp_shr1_5Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %eax
+ movl 16(%eax), %ecx
+ movl 12(%eax), %edx
+ movl 8(%eax), %esi
+ movl (%eax), %edi
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %edi
+ movl 16(%esp), %ebx
+ movl %edi, (%ebx)
+ shrdl $1, %esi, %eax
+ movl %eax, 4(%ebx)
+ shrdl $1, %edx, %esi
+ movl %esi, 8(%ebx)
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%ebx)
+ shrl %ecx
+ movl %ecx, 16(%ebx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end73:
+ .size mcl_fp_shr1_5Lbmi2, .Lfunc_end73-mcl_fp_shr1_5Lbmi2
+
+ .globl mcl_fp_add5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add5Lbmi2,@function
+mcl_fp_add5Lbmi2: # @mcl_fp_add5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %ebx
+ movl (%ebx), %eax
+ movl 4(%ebx), %ecx
+ movl 24(%esp), %edi
+ addl (%edi), %eax
+ adcl 4(%edi), %ecx
+ movl 8(%ebx), %edx
+ adcl 8(%edi), %edx
+ movl 12(%edi), %esi
+ movl 16(%edi), %edi
+ adcl 12(%ebx), %esi
+ adcl 16(%ebx), %edi
+ movl 20(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %ecx, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl %esi, 12(%ebx)
+ movl %edi, 16(%ebx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 32(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %ecx
+ sbbl 8(%ebp), %edx
+ sbbl 12(%ebp), %esi
+ sbbl 16(%ebp), %edi
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB74_2
+# BB#1: # %nocarry
+ movl 20(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %ecx, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl %esi, 12(%ebx)
+ movl %edi, 16(%ebx)
+.LBB74_2: # %carry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end74:
+ .size mcl_fp_add5Lbmi2, .Lfunc_end74-mcl_fp_add5Lbmi2
+
+ .globl mcl_fp_addNF5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF5Lbmi2,@function
+mcl_fp_addNF5Lbmi2: # @mcl_fp_addNF5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %esi
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ movl 44(%esp), %edi
+ addl (%edi), %ebx
+ adcl 4(%edi), %eax
+ movl 16(%esi), %ecx
+ movl 12(%esi), %edx
+ movl 8(%esi), %ebp
+ adcl 8(%edi), %ebp
+ adcl 12(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 16(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edi
+ movl %ebx, %esi
+ subl (%edi), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %eax, %esi
+ sbbl 4(%edi), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ sbbl 8(%edi), %esi
+ sbbl 12(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ sbbl 16(%edi), %edx
+ movl %edx, %edi
+ sarl $31, %edi
+ testl %edi, %edi
+ js .LBB75_2
+# BB#1:
+ movl (%esp), %ebx # 4-byte Reload
+.LBB75_2:
+ movl 40(%esp), %edi
+ movl %ebx, (%edi)
+ js .LBB75_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB75_4:
+ movl %eax, 4(%edi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ js .LBB75_6
+# BB#5:
+ movl %esi, %ebp
+.LBB75_6:
+ movl %ebp, 8(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ js .LBB75_8
+# BB#7:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB75_8:
+ movl %ecx, 12(%edi)
+ js .LBB75_10
+# BB#9:
+ movl %edx, %eax
+.LBB75_10:
+ movl %eax, 16(%edi)
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end75:
+ .size mcl_fp_addNF5Lbmi2, .Lfunc_end75-mcl_fp_addNF5Lbmi2
+
+ .globl mcl_fp_sub5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub5Lbmi2,@function
+mcl_fp_sub5Lbmi2: # @mcl_fp_sub5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ xorl %ebx, %ebx
+ movl 28(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %ecx
+ movl 8(%edi), %edx
+ sbbl 8(%ebp), %edx
+ movl 12(%edi), %esi
+ sbbl 12(%ebp), %esi
+ movl 16(%edi), %edi
+ sbbl 16(%ebp), %edi
+ movl 20(%esp), %ebp
+ movl %eax, (%ebp)
+ movl %ecx, 4(%ebp)
+ movl %edx, 8(%ebp)
+ movl %esi, 12(%ebp)
+ movl %edi, 16(%ebp)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB76_2
+# BB#1: # %carry
+ movl 32(%esp), %ebx
+ addl (%ebx), %eax
+ movl %eax, (%ebp)
+ adcl 4(%ebx), %ecx
+ movl %ecx, 4(%ebp)
+ adcl 8(%ebx), %edx
+ movl %edx, 8(%ebp)
+ movl 12(%ebx), %eax
+ adcl %esi, %eax
+ movl %eax, 12(%ebp)
+ movl 16(%ebx), %eax
+ adcl %edi, %eax
+ movl %eax, 16(%ebp)
+.LBB76_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end76:
+ .size mcl_fp_sub5Lbmi2, .Lfunc_end76-mcl_fp_sub5Lbmi2
+
+ .globl mcl_fp_subNF5Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF5Lbmi2,@function
+mcl_fp_subNF5Lbmi2: # @mcl_fp_subNF5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 40(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 44(%esp), %ebx
+ subl (%ebx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ sbbl 4(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 16(%edi), %esi
+ movl 12(%edi), %eax
+ movl 8(%edi), %ecx
+ sbbl 8(%ebx), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ sbbl 12(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %esi
+ movl %esi, %ebx
+ sarl $31, %ebx
+ movl %ebx, %ebp
+ shldl $1, %esi, %ebp
+ movl 48(%esp), %edi
+ movl 4(%edi), %ecx
+ andl %ebp, %ecx
+ andl (%edi), %ebp
+ movl 16(%edi), %edx
+ andl %ebx, %edx
+ rorxl $31, %ebx, %eax
+ andl 12(%edi), %ebx
+ andl 8(%edi), %eax
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl 36(%esp), %edi
+ movl %ebp, (%edi)
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 4(%edi)
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %eax, 8(%edi)
+ movl %ebx, 12(%edi)
+ adcl %esi, %edx
+ movl %edx, 16(%edi)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end77:
+ .size mcl_fp_subNF5Lbmi2, .Lfunc_end77-mcl_fp_subNF5Lbmi2
+
+ .globl mcl_fpDbl_add5Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add5Lbmi2,@function
+mcl_fpDbl_add5Lbmi2: # @mcl_fpDbl_add5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 56(%esp), %edx
+ movl 52(%esp), %ecx
+ movl 12(%ecx), %ebx
+ movl 16(%ecx), %ebp
+ movl 8(%edx), %esi
+ movl (%edx), %edi
+ addl (%ecx), %edi
+ movl 48(%esp), %eax
+ movl %edi, (%eax)
+ movl 4(%edx), %edi
+ adcl 4(%ecx), %edi
+ adcl 8(%ecx), %esi
+ adcl 12(%edx), %ebx
+ adcl 16(%edx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl %edi, 4(%eax)
+ movl 28(%edx), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl %esi, 8(%eax)
+ movl 20(%edx), %esi
+ movl %ebx, 12(%eax)
+ movl 20(%ecx), %ebp
+ adcl %esi, %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 24(%edx), %esi
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%eax)
+ movl 24(%ecx), %ebx
+ adcl %esi, %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 28(%ecx), %edi
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl 32(%ecx), %esi
+ adcl %eax, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 36(%edx), %eax
+ movl 36(%ecx), %edx
+ adcl %eax, %edx
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %ebp, %ecx
+ movl 60(%esp), %ebp
+ subl (%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ sbbl 4(%ebp), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ sbbl 8(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl %esi, %ebx
+ movl %edx, %esi
+ sbbl 12(%ebp), %ebx
+ sbbl 16(%ebp), %edx
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB78_2
+# BB#1:
+ movl %edx, %esi
+.LBB78_2:
+ testb %al, %al
+ movl 12(%esp), %ebp # 4-byte Reload
+ jne .LBB78_4
+# BB#3:
+ movl (%esp), %ebp # 4-byte Reload
+.LBB78_4:
+ movl 48(%esp), %eax
+ movl %ebp, 20(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl 20(%esp), %edx # 4-byte Reload
+ movl 16(%esp), %edi # 4-byte Reload
+ jne .LBB78_6
+# BB#5:
+ movl 4(%esp), %edi # 4-byte Reload
+.LBB78_6:
+ movl %edi, 24(%eax)
+ jne .LBB78_8
+# BB#7:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB78_8:
+ movl %edx, 28(%eax)
+ jne .LBB78_10
+# BB#9:
+ movl %ebx, %ecx
+.LBB78_10:
+ movl %ecx, 32(%eax)
+ movl %esi, 36(%eax)
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end78:
+ .size mcl_fpDbl_add5Lbmi2, .Lfunc_end78-mcl_fpDbl_add5Lbmi2
+
+ .globl mcl_fpDbl_sub5Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub5Lbmi2,@function
+mcl_fpDbl_sub5Lbmi2: # @mcl_fpDbl_sub5Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 40(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edi
+ movl 44(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%eax), %ebx
+ sbbl 8(%edx), %ebx
+ movl 36(%esp), %ecx
+ movl %esi, (%ecx)
+ movl 12(%eax), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ecx)
+ movl 16(%eax), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ sbbl %ebx, %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 24(%edx), %esi
+ movl %edi, 16(%ecx)
+ movl 24(%eax), %ebp
+ sbbl %esi, %ebp
+ movl 28(%edx), %esi
+ movl 28(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl 32(%edx), %esi
+ movl 32(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 36(%edx), %edx
+ movl 36(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl $0, %edx
+ sbbl $0, %edx
+ andl $1, %edx
+ movl 48(%esp), %ebx
+ jne .LBB79_1
+# BB#2:
+ xorl %eax, %eax
+ jmp .LBB79_3
+.LBB79_1:
+ movl 16(%ebx), %eax
+.LBB79_3:
+ testb %dl, %dl
+ jne .LBB79_4
+# BB#5:
+ movl $0, %edx
+ movl $0, %esi
+ jmp .LBB79_6
+.LBB79_4:
+ movl (%ebx), %esi
+ movl 4(%ebx), %edx
+.LBB79_6:
+ jne .LBB79_7
+# BB#8:
+ movl $0, %edi
+ jmp .LBB79_9
+.LBB79_7:
+ movl 12(%ebx), %edi
+.LBB79_9:
+ jne .LBB79_10
+# BB#11:
+ xorl %ebx, %ebx
+ jmp .LBB79_12
+.LBB79_10:
+ movl 8(%ebx), %ebx
+.LBB79_12:
+ addl 4(%esp), %esi # 4-byte Folded Reload
+ adcl %ebp, %edx
+ movl %esi, 20(%ecx)
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl %edx, 24(%ecx)
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 28(%ecx)
+ movl %edi, 32(%ecx)
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end79:
+ .size mcl_fpDbl_sub5Lbmi2, .Lfunc_end79-mcl_fpDbl_sub5Lbmi2
+
+ .globl mcl_fp_mulUnitPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre6Lbmi2,@function
+mcl_fp_mulUnitPre6Lbmi2: # @mcl_fp_mulUnitPre6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $12, %esp
+ movl 40(%esp), %edx
+ movl 36(%esp), %esi
+ mulxl 4(%esi), %ecx, %edi
+ mulxl (%esi), %eax, %ebx
+ movl %eax, 8(%esp) # 4-byte Spill
+ addl %ecx, %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ mulxl 8(%esi), %ebp, %eax
+ adcl %edi, %ebp
+ mulxl 12(%esi), %ecx, %edi
+ adcl %eax, %ecx
+ mulxl 16(%esi), %eax, %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ adcl %edi, %eax
+ mulxl 20(%esi), %edx, %esi
+ movl 32(%esp), %edi
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%edi)
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%edi)
+ movl %ebp, 8(%edi)
+ movl %ecx, 12(%edi)
+ movl %eax, 16(%edi)
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%edi)
+ adcl $0, %esi
+ movl %esi, 24(%edi)
+ addl $12, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end80:
+ .size mcl_fp_mulUnitPre6Lbmi2, .Lfunc_end80-mcl_fp_mulUnitPre6Lbmi2
+
+ .globl mcl_fpDbl_mulPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre6Lbmi2,@function
+mcl_fpDbl_mulPre6Lbmi2: # @mcl_fpDbl_mulPre6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 80(%esp), %ebp
+ movl (%ebp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 4(%ebp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl (%eax), %eax
+ mulxl %eax, %esi, %edi
+ movl %ecx, %edx
+ mulxl %eax, %edx, %ecx
+ movl %edx, 28(%esp) # 4-byte Spill
+ addl %esi, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 8(%ebp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %ebx
+ adcl %edi, %esi
+ movl 12(%ebp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %ebp, %ecx
+ mulxl %eax, %edi, %ebp
+ adcl %ebx, %edi
+ movl 16(%ecx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl %ecx, %edx
+ movl 20(%edx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %eax, %eax, %ecx
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl 76(%esp), %edx
+ movl 28(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%edx)
+ adcl $0, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %edx
+ movl 4(%edx), %ebp
+ movl 52(%esp), %edx # 4-byte Reload
+ mulxl %ebp, %edx, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ addl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %ebp, %ecx, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %ebp, %esi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %ebp, %edi, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %ebp, %ebx, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %eax, %ebx
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %ebp, %eax, %edx
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebp
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 28(%esp), %ecx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 28(%esp) # 4-byte Spill
+ adcl %edx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl 52(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%eax)
+ movl 80(%esp), %eax
+ movl (%eax), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 8(%eax), %eax
+ mulxl %eax, %edx, %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ addl %ecx, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp
+ movl 4(%ebp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 8(%ebp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl %edi, %ecx
+ movl %ecx, %esi
+ movl 12(%ebp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %eax, %edi, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ movl 16(%ebp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 20(%ebp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ mulxl %eax, %ebp, %edx
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 16(%esp), %eax # 4-byte Reload
+ addl %eax, 52(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ adcl %edx, %ecx
+ movl 76(%esp), %eax
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%eax)
+ movl 84(%esp), %eax
+ movl 12(%eax), %eax
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ addl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %eax, %esi, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edi, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl 28(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebp, %edx
+ adcl %ecx, %ebp
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 24(%esp), %ecx # 4-byte Reload
+ addl %ecx, 52(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 80(%esp), %ecx
+ movl (%ecx), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 16(%eax), %eax
+ mulxl %eax, %edx, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ addl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx
+ movl 4(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl 80(%esp), %edx
+ movl 8(%edx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl %esi, %edi
+ movl 80(%esp), %esi
+ movl %esi, %edx
+ movl 12(%edx), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl %ebx, %esi
+ movl 80(%esp), %edx
+ movl 16(%edx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl 80(%esp), %edx
+ movl 20(%edx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %eax, %ebp, %edx
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl 28(%esp), %edx # 4-byte Reload
+ movl %edx, 16(%eax)
+ movl 84(%esp), %eax
+ movl 20(%eax), %eax
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ addl %ecx, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %edi
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %eax, %esi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl %ebx, %esi
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebx, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%eax)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%eax)
+ movl %ecx, 28(%eax)
+ movl %esi, 32(%eax)
+ movl %ebx, 36(%eax)
+ movl %edx, 40(%eax)
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%eax)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end81:
+ .size mcl_fpDbl_mulPre6Lbmi2, .Lfunc_end81-mcl_fpDbl_mulPre6Lbmi2
+
+ .globl mcl_fpDbl_sqrPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre6Lbmi2,@function
+mcl_fpDbl_sqrPre6Lbmi2: # @mcl_fpDbl_sqrPre6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ebp
+ movl (%ebp), %ecx
+ movl 4(%ebp), %eax
+ movl %eax, %edx
+ mulxl %ecx, %edi, %esi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ mulxl %ecx, %ebx, %edx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 8(%ebp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl %ecx, %edi, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl 12(%ebp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %ecx, %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl 16(%ebp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %ecx, %edx, %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 20(%ebp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %ecx, %ebp, %edx
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl 80(%esp), %ecx
+ movl 28(%esp), %esi # 4-byte Reload
+ movl %esi, (%ecx)
+ adcl $0, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl %ecx, 56(%esp) # 4-byte Folded Spill
+ movl %eax, %edx
+ mulxl %eax, %esi, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebx, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edi, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl %ebp, %edi
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %eax
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %ebp
+ sbbl %edx, %edx
+ andl $1, %edx
+ addl 52(%esp), %esi # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl %eax, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl 56(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%eax)
+ movl 84(%esp), %eax
+ movl (%eax), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 8(%eax), %ebp
+ mulxl %ebp, %edx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ addl %esi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 4(%eax), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %ebp, %edx, %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl %ecx, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ mulxl %ebp, %ecx, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl %eax, %esi
+ movl 12(%esi), %eax
+ movl %eax, %edx
+ mulxl %ebp, %ebx, %edx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ movl 16(%esi), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %ebp, %ebx, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl 20(%esi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %ebp, %esi, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ movl 20(%esp), %edx # 4-byte Reload
+ addl %edx, 56(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ addl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %eax, %edx
+ mulxl %eax, %ecx, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebx, %edx
+ adcl %ebp, %ebx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 20(%esp), %eax # 4-byte Reload
+ addl %eax, 48(%esp) # 4-byte Folded Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl %edx, %ecx
+ movl 80(%esp), %eax
+ movl 44(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%eax)
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%eax)
+ movl 84(%esp), %esi
+ movl (%esi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 16(%esi), %ebp
+ mulxl %ebp, %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ addl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 4(%esi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %ebp, %eax, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl %edi, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 8(%esi), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %ebp, %eax, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 12(%esi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %ebp, %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ebp, %edx
+ mulxl %ebp, %eax, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %ebx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 20(%esi), %ebx
+ movl %ebx, %edx
+ mulxl %ebp, %edx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl %edx, %ecx
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ movl 8(%esp), %esi # 4-byte Reload
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl %edx, 56(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl %edx, 52(%esp) # 4-byte Folded Spill
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ adcl %eax, %ebp
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ addl %esi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %edx, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %eax
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %esi, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl 32(%esp), %edx # 4-byte Reload
+ mulxl %ebx, %edi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, %edx
+ mulxl %ebx, %ebx, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl 80(%esp), %eax
+ movl 48(%esp), %edx # 4-byte Reload
+ movl %edx, 16(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ movl %edx, 20(%eax)
+ movl 52(%esp), %edx # 4-byte Reload
+ movl %edx, 24(%eax)
+ movl %esi, 28(%eax)
+ movl %edi, 32(%eax)
+ movl %ecx, 36(%eax)
+ movl %ebx, 40(%eax)
+ adcl 56(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%eax)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end82:
+ .size mcl_fpDbl_sqrPre6Lbmi2, .Lfunc_end82-mcl_fpDbl_sqrPre6Lbmi2
+
+ .globl mcl_fp_mont6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont6Lbmi2,@function
+mcl_fp_mont6Lbmi2: # @mcl_fp_mont6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 124(%esp), %eax
+ movl 20(%eax), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx
+ movl (%ecx), %ecx
+ mulxl %ecx, %edx, %ebp
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 16(%eax), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ mulxl %ecx, %edx, %edi
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 12(%eax), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ mulxl %ecx, %edx, %esi
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl (%eax), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl %ecx, %eax, %edx
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl %ecx, %ebx, %edx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ addl 80(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl 8(%eax), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %eax
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 92(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 96(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 132(%esp), %edi
+ movl -4(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ imull %eax, %edx
+ movl (%edi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 4(%edi), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ mulxl %esi, %esi, %ebp
+ mulxl %eax, %ecx, %eax
+ movl %ecx, 12(%esp) # 4-byte Spill
+ addl %esi, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %esi
+ adcl %ebp, %ecx
+ movl 12(%edi), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %eax
+ adcl %esi, %ebx
+ movl 16(%edi), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ mulxl %esi, %esi, %ebp
+ adcl %eax, %esi
+ movl 20(%edi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ mulxl %eax, %edi, %eax
+ adcl %ebp, %edi
+ adcl $0, %eax
+ movl 12(%esp), %edx # 4-byte Reload
+ addl 16(%esp), %edx # 4-byte Folded Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl %edx, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 128(%esp), %edx
+ movl 4(%edx), %edx
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload
+ mulxl 52(%esp), %ebx, %ebp # 4-byte Folded Reload
+ addl %eax, %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ mulxl 56(%esp), %esi, %ebp # 4-byte Folded Reload
+ adcl %ecx, %esi
+ mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %ebp, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 28(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl %edi, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, %edi
+ mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, %eax
+ mulxl 84(%esp), %esi, %ebx # 4-byte Folded Reload
+ adcl %ecx, %esi
+ mulxl 80(%esp), %ecx, %ebp # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ movl %ecx, %ebx
+ mulxl 76(%esp), %ecx, %edx # 4-byte Folded Reload
+ adcl %ebp, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, %ecx
+ movl 36(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ movl 12(%esp), %ebp # 4-byte Reload
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 128(%esp), %edx
+ movl 8(%edx), %edx
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ mulxl 48(%esp), %ebx, %edi # 4-byte Folded Reload
+ mulxl 52(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ addl %ebx, %esi
+ mulxl 56(%esp), %ecx, %ebp # 4-byte Folded Reload
+ adcl %edi, %ecx
+ mulxl 68(%esp), %edi, %eax # 4-byte Folded Reload
+ adcl %ebp, %edi
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 4(%esp), %ebx # 4-byte Reload
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 92(%esp), %ebp, %esi # 4-byte Folded Reload
+ movl %ebp, 36(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ecx, %esi
+ movl %esi, %ecx
+ mulxl 80(%esp), %esi, %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl %eax, %esi
+ mulxl 76(%esp), %ebp, %eax # 4-byte Folded Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ addl %ebx, 36(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 28(%esp) # 4-byte Folded Spill
+ movl 16(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 24(%esp) # 4-byte Folded Spill
+ adcl %edi, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 128(%esp), %edx
+ movl 12(%edx), %edx
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ mulxl 72(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ mulxl 48(%esp), %eax, %ebx # 4-byte Folded Reload
+ mulxl 52(%esp), %ecx, %edi # 4-byte Folded Reload
+ addl %eax, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ mulxl 56(%esp), %eax, %edi # 4-byte Folded Reload
+ adcl %ebx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ mulxl 68(%esp), %ebx, %eax # 4-byte Folded Reload
+ adcl %edi, %ebx
+ adcl %esi, %eax
+ movl %eax, %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 16(%esp), %edi # 4-byte Reload
+ adcl %edi, 20(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl %ebp, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, %ebx
+ mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, %edi
+ mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ecx, %esi
+ movl %esi, %ecx
+ mulxl 80(%esp), %esi, %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ adcl %eax, %esi
+ movl %esi, %ebp
+ mulxl 76(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ movl 16(%esp), %esi # 4-byte Reload
+ addl 4(%esp), %esi # 4-byte Folded Reload
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %esi # 4-byte Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 128(%esp), %edx
+ movl 16(%edx), %edx
+ mulxl 64(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ mulxl 48(%esp), %ebx, %eax # 4-byte Folded Reload
+ mulxl 52(%esp), %ecx, %edi # 4-byte Folded Reload
+ addl %ebx, %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ mulxl 56(%esp), %edi, %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ adcl %eax, %edi
+ movl %edi, %ebx
+ mulxl 68(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %edi
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, %edx
+ adcl $0, %edx
+ addl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 28(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 4(%esp), %ebx # 4-byte Reload
+ adcl %esi, %ebx
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ mulxl 88(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ mulxl 84(%esp), %edi, %eax # 4-byte Folded Reload
+ adcl %ecx, %edi
+ mulxl 80(%esp), %esi, %ebp # 4-byte Folded Reload
+ adcl %eax, %esi
+ mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %ebp, %ecx
+ adcl $0, %eax
+ movl %eax, %ebp
+ movl 40(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ movl 12(%esp), %eax # 4-byte Reload
+ addl 8(%esp), %eax # 4-byte Folded Reload
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl %ebx, %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 128(%esp), %edx
+ movl 20(%edx), %edx
+ mulxl 48(%esp), %eax, %esi # 4-byte Folded Reload
+ mulxl 52(%esp), %ecx, %ebp # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ addl %eax, %ebp
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ mulxl 56(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl %esi, %ebx
+ mulxl 72(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ mulxl 68(%esp), %esi, %edx # 4-byte Folded Reload
+ adcl %ecx, %esi
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %ecx
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 52(%esp), %edi # 4-byte Reload
+ addl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ imull %edi, %edx
+ mulxl 92(%esp), %eax, %edi # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload
+ addl %edi, %ecx
+ mulxl 88(%esp), %edi, %ebx # 4-byte Folded Reload
+ adcl %esi, %edi
+ movl %edx, %esi
+ mulxl 84(%esp), %ebp, %eax # 4-byte Folded Reload
+ adcl %ebx, %ebp
+ movl %esi, %edx
+ mulxl 80(%esp), %ebx, %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl %eax, %ebx
+ movl %esi, %edx
+ mulxl 76(%esp), %esi, %edx # 4-byte Folded Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %edx
+ andl $1, 72(%esp) # 4-byte Folded Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 52(%esp), %eax # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ subl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl %edi, %ecx
+ sbbl 96(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, %edi
+ sbbl 88(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl %esi, %edi
+ sbbl 84(%esp), %ebx # 4-byte Folded Reload
+ sbbl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 76(%esp), %esi # 4-byte Folded Reload
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB83_2
+# BB#1:
+ movl %ecx, 68(%esp) # 4-byte Spill
+.LBB83_2:
+ testb %al, %al
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB83_4
+# BB#3:
+ movl 72(%esp), %ecx # 4-byte Reload
+.LBB83_4:
+ movl 120(%esp), %eax
+ movl %ecx, (%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ jne .LBB83_6
+# BB#5:
+ movl 92(%esp), %ebp # 4-byte Reload
+.LBB83_6:
+ movl %ebp, 8(%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ jne .LBB83_8
+# BB#7:
+ movl %ebx, %ecx
+.LBB83_8:
+ movl %ecx, 12(%eax)
+ jne .LBB83_10
+# BB#9:
+ movl 96(%esp), %edi # 4-byte Reload
+.LBB83_10:
+ movl %edi, 16(%eax)
+ jne .LBB83_12
+# BB#11:
+ movl %esi, %edx
+.LBB83_12:
+ movl %edx, 20(%eax)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end83:
+ .size mcl_fp_mont6Lbmi2, .Lfunc_end83-mcl_fp_mont6Lbmi2
+
+ .globl mcl_fp_montNF6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF6Lbmi2,@function
+mcl_fp_montNF6Lbmi2: # @mcl_fp_montNF6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %ebx
+ movl (%ebx), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 4(%ebx), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl (%eax), %eax
+ mulxl %eax, %ecx, %esi
+ movl %edi, %edx
+ mulxl %eax, %edx, %ebp
+ movl %edx, 76(%esp) # 4-byte Spill
+ addl %ecx, %ebp
+ movl 8(%ebx), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %edi
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ movl 12(%ebx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl %edi, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 16(%ebx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %edi
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 20(%ebx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %eax
+ adcl %edi, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %ebx
+ movl -4(%ebx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ movl %edi, %edx
+ imull %eax, %edx
+ movl (%ebx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ addl %edi, %ecx
+ movl 4(%ebx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ mulxl %eax, %edi, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl %ebp, %edi
+ movl 8(%ebx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ mulxl %eax, %eax, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl %esi, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl 16(%ebx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl 20(%ebx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ mulxl %eax, %ebp, %eax
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 12(%esp), %edi # 4-byte Reload
+ adcl %edi, 24(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 4(%eax), %edx
+ mulxl 48(%esp), %ecx, %esi # 4-byte Folded Reload
+ mulxl 52(%esp), %ebp, %eax # 4-byte Folded Reload
+ addl %ecx, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ mulxl 44(%esp), %ecx, %edi # 4-byte Folded Reload
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ mulxl 40(%esp), %eax, %ebx # 4-byte Folded Reload
+ adcl %edi, %eax
+ mulxl 36(%esp), %ecx, %edi # 4-byte Folded Reload
+ movl %edi, (%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl %ecx, %edi
+ mulxl 32(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ecx
+ movl %ecx, %edx
+ addl 20(%esp), %ebp # 4-byte Folded Reload
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ movl %esi, %edx
+ imull 56(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ebp, %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ addl %esi, %ebp
+ mulxl 76(%esp), %ebp, %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl %ecx, %ebp
+ movl %ebp, %esi
+ mulxl 72(%esp), %ebp, %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %ecx
+ mulxl 68(%esp), %ebp, %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl %eax, %ebp
+ movl %ebp, %eax
+ mulxl 64(%esp), %ebp, %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ mulxl 60(%esp), %edi, %edx # 4-byte Folded Reload
+ adcl %ebx, %edi
+ movl %edi, %ebx
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ adcl %edx, %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 8(%eax), %edx
+ mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload
+ mulxl 52(%esp), %esi, %edi # 4-byte Folded Reload
+ movl %esi, (%esp) # 4-byte Spill
+ addl %eax, %edi
+ mulxl 44(%esp), %eax, %esi # 4-byte Folded Reload
+ adcl %ecx, %eax
+ movl %eax, %ecx
+ mulxl 40(%esp), %eax, %ebx # 4-byte Folded Reload
+ adcl %esi, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 36(%esp), %eax, %ebp # 4-byte Folded Reload
+ adcl %ebx, %eax
+ movl %eax, %esi
+ mulxl 32(%esp), %ebx, %eax # 4-byte Folded Reload
+ adcl %ebp, %ebx
+ adcl $0, %eax
+ movl %eax, %edx
+ movl (%esp), %ebp # 4-byte Reload
+ addl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %esi, (%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ movl %ebp, %eax
+ imull 56(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ebp, %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ addl %eax, %ebp
+ mulxl 76(%esp), %ebp, %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ movl %ebp, %edi
+ mulxl 72(%esp), %ebp, %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %ecx
+ mulxl 68(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ mulxl 64(%esp), %ebp, %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ mulxl 60(%esp), %ebp, %edx # 4-byte Folded Reload
+ adcl %ebx, %ebp
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl %eax, 20(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl %edx, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 12(%eax), %edx
+ mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload
+ mulxl 52(%esp), %ebp, %esi # 4-byte Folded Reload
+ addl %eax, %esi
+ mulxl 44(%esp), %eax, %edi # 4-byte Folded Reload
+ adcl %ecx, %eax
+ mulxl 40(%esp), %ecx, %ebx # 4-byte Folded Reload
+ adcl %edi, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ mulxl 36(%esp), %ecx, %edi # 4-byte Folded Reload
+ movl %edi, (%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl %ecx, %edi
+ mulxl 32(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ecx
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl %ebp, %ecx
+ movl %ecx, %edx
+ imull 56(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ addl %ecx, %ebp
+ mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ mulxl 72(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ mulxl 68(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mulxl 64(%esp), %esi, %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ mulxl 60(%esp), %esi, %edx # 4-byte Folded Reload
+ adcl %ebx, %esi
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl %eax, 20(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ adcl %edx, %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 16(%eax), %edx
+ mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload
+ mulxl 52(%esp), %esi, %edi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ addl %eax, %edi
+ mulxl 44(%esp), %eax, %esi # 4-byte Folded Reload
+ adcl %ecx, %eax
+ mulxl 40(%esp), %ecx, %ebx # 4-byte Folded Reload
+ adcl %esi, %ecx
+ mulxl 36(%esp), %esi, %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ mulxl 32(%esp), %ebx, %edx # 4-byte Folded Reload
+ adcl %ebp, %ebx
+ adcl $0, %edx
+ movl 24(%esp), %ebp # 4-byte Reload
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ imull 56(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ mulxl 76(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ mulxl 72(%esp), %edi, %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ mulxl 68(%esp), %eax, %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl %ecx, %eax
+ mulxl 64(%esp), %ecx, %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl %ecx, %edi
+ mulxl 60(%esp), %ecx, %edx # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 24(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ adcl %edx, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 20(%eax), %edx
+ mulxl 48(%esp), %ebx, %eax # 4-byte Folded Reload
+ mulxl 52(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ addl %ebx, %esi
+ mulxl 44(%esp), %ebx, %ebp # 4-byte Folded Reload
+ adcl %eax, %ebx
+ mulxl 40(%esp), %eax, %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl %eax, %ebp
+ mulxl 36(%esp), %eax, %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 32(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ movl 52(%esp), %edi # 4-byte Reload
+ addl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl %ecx, 48(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl 56(%esp), %edx # 4-byte Reload
+ movl 52(%esp), %ebp # 4-byte Reload
+ imull %ebp, %edx
+ mulxl 80(%esp), %ecx, %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ addl %ebp, %ecx
+ mulxl 76(%esp), %ebp, %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl %esi, %ebp
+ mulxl 72(%esp), %ecx, %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl %edx, %ebx
+ mulxl 68(%esp), %esi, %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %ebx, %edx
+ mulxl 64(%esp), %edi, %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, %edx
+ mulxl 60(%esp), %ebx, %edx # 4-byte Folded Reload
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %eax
+ addl 56(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl %edx, %eax
+ movl %ebp, %edx
+ subl 80(%esp), %edx # 4-byte Folded Reload
+ sbbl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ movl %ebx, %esi
+ sbbl 72(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 76(%esp) # 4-byte Spill
+ sbbl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl %esi, %ebx
+ sbbl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %eax, %edi
+ sbbl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %ebp
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ js .LBB84_2
+# BB#1:
+ movl %edx, 56(%esp) # 4-byte Spill
+.LBB84_2:
+ movl 104(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, (%ebp)
+ movl 44(%esp), %ecx # 4-byte Reload
+ js .LBB84_4
+# BB#3:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB84_4:
+ movl %ecx, 4(%ebp)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ js .LBB84_6
+# BB#5:
+ movl 76(%esp), %edx # 4-byte Reload
+.LBB84_6:
+ movl %edx, 8(%ebp)
+ js .LBB84_8
+# BB#7:
+ movl 80(%esp), %ecx # 4-byte Reload
+.LBB84_8:
+ movl %ecx, 12(%ebp)
+ js .LBB84_10
+# BB#9:
+ movl %ebx, %esi
+.LBB84_10:
+ movl %esi, 16(%ebp)
+ js .LBB84_12
+# BB#11:
+ movl %edi, %eax
+.LBB84_12:
+ movl %eax, 20(%ebp)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end84:
+ .size mcl_fp_montNF6Lbmi2, .Lfunc_end84-mcl_fp_montNF6Lbmi2
+
+ .globl mcl_fp_montRed6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed6Lbmi2,@function
+mcl_fp_montRed6Lbmi2: # @mcl_fp_montRed6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 116(%esp), %edi
+ movl -4(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl (%edi), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx
+ movl (%ecx), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ imull %eax, %edx
+ movl 20(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %eax
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 16(%edi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %eax
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 12(%edi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %eax
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 4(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl %esi, %ecx, %eax
+ movl %ecx, 48(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ movl %eax, %ebp
+ movl 8(%edi), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ mulxl %esi, %eax, %edx
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 84(%esp), %ecx # 4-byte Folded Reload
+ movl 112(%esp), %ecx
+ adcl 4(%ecx), %ebp
+ adcl 8(%ecx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 12(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ adcl 16(%ecx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 20(%ecx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 24(%ecx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl 40(%ecx), %esi
+ movl 36(%ecx), %edi
+ movl 32(%ecx), %ebx
+ movl 28(%ecx), %eax
+ adcl $0, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ movl %ebx, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 68(%esp), %esi, %ebp # 4-byte Folded Reload
+ movl %esi, 8(%esp) # 4-byte Spill
+ addl %ecx, %ebp
+ mulxl 64(%esp), %edi, %ecx # 4-byte Folded Reload
+ adcl %eax, %edi
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ mulxl 80(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ movl 52(%esp), %esi # 4-byte Reload
+ mulxl %esi, %edx, %eax
+ adcl (%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ addl %ebx, 8(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl %edi, 24(%esp) # 4-byte Folded Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ebp, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl %esi, %ecx, %eax
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ mulxl 64(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ mulxl 72(%esp), %edi, %eax # 4-byte Folded Reload
+ mulxl 68(%esp), %ecx, %ebx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ addl %edi, %ebx
+ adcl %esi, %eax
+ movl %eax, %esi
+ mulxl 76(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ mulxl 80(%esp), %edi, %edx # 4-byte Folded Reload
+ adcl %eax, %edi
+ movl %edi, %eax
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %edi
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebp, 8(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ebx, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ mulxl 64(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 68(%esp), %edi, %ebp # 4-byte Folded Reload
+ addl %ecx, %ebp
+ adcl %esi, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl 76(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %esi
+ mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebx, %edi
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %edi # 4-byte Reload
+ adcl %edi, 32(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ebp, %edx
+ imull 60(%esp), %edx # 4-byte Folded Reload
+ mulxl 52(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ mulxl 64(%esp), %edi, %esi # 4-byte Folded Reload
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ mulxl 68(%esp), %ebx, %ecx # 4-byte Folded Reload
+ addl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl %edi, %eax
+ movl %eax, %edi
+ mulxl 76(%esp), %eax, %ecx # 4-byte Folded Reload
+ adcl %esi, %eax
+ movl %eax, %esi
+ mulxl 80(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl %ecx, %edx
+ movl %edx, %ecx
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebp, %ebx
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ imull %ebx, %edx
+ mulxl 68(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ mulxl 72(%esp), %eax, %edi # 4-byte Folded Reload
+ addl %ecx, %eax
+ mulxl 64(%esp), %ebp, %ecx # 4-byte Folded Reload
+ adcl %edi, %ebp
+ movl %edx, %edi
+ mulxl 76(%esp), %esi, %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl %ecx, %esi
+ movl %edi, %edx
+ mulxl 80(%esp), %ebx, %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, %edx
+ mulxl 52(%esp), %ecx, %edi # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edi
+ movl 60(%esp), %edx # 4-byte Reload
+ addl 40(%esp), %edx # 4-byte Folded Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ subl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ sbbl 72(%esp), %eax # 4-byte Folded Reload
+ movl %esi, %ebp
+ sbbl 64(%esp), %ebp # 4-byte Folded Reload
+ sbbl 76(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ sbbl 80(%esp), %ebx # 4-byte Folded Reload
+ sbbl 52(%esp), %edi # 4-byte Folded Reload
+ sbbl $0, %edx
+ andl $1, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ jne .LBB85_2
+# BB#1:
+ movl %eax, 60(%esp) # 4-byte Spill
+.LBB85_2:
+ movl 84(%esp), %eax # 4-byte Reload
+ testb %al, %al
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB85_4
+# BB#3:
+ movl 68(%esp), %ecx # 4-byte Reload
+.LBB85_4:
+ movl 108(%esp), %eax
+ movl %ecx, (%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB85_6
+# BB#5:
+ movl %ebp, %esi
+.LBB85_6:
+ movl %esi, 8(%eax)
+ jne .LBB85_8
+# BB#7:
+ movl 76(%esp), %ecx # 4-byte Reload
+.LBB85_8:
+ movl %ecx, 12(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB85_10
+# BB#9:
+ movl %ebx, %ecx
+.LBB85_10:
+ movl %ecx, 16(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB85_12
+# BB#11:
+ movl %edi, %ecx
+.LBB85_12:
+ movl %ecx, 20(%eax)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end85:
+ .size mcl_fp_montRed6Lbmi2, .Lfunc_end85-mcl_fp_montRed6Lbmi2
+
+ .globl mcl_fp_addPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre6Lbmi2,@function
+mcl_fp_addPre6Lbmi2: # @mcl_fp_addPre6Lbmi2
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 12(%esp), %edx
+ addl (%edx), %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 4(%eax), %ecx
+ adcl 4(%edx), %ecx
+ movl %ecx, 4(%esi)
+ movl 8(%eax), %ecx
+ adcl 8(%edx), %ecx
+ movl %ecx, 8(%esi)
+ movl 12(%edx), %ecx
+ adcl 12(%eax), %ecx
+ movl %ecx, 12(%esi)
+ movl 16(%edx), %ecx
+ adcl 16(%eax), %ecx
+ movl %ecx, 16(%esi)
+ movl 20(%eax), %eax
+ movl 20(%edx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 20(%esi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end86:
+ .size mcl_fp_addPre6Lbmi2, .Lfunc_end86-mcl_fp_addPre6Lbmi2
+
+ .globl mcl_fp_subPre6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre6Lbmi2,@function
+mcl_fp_subPre6Lbmi2: # @mcl_fp_subPre6Lbmi2
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %edx
+ xorl %eax, %eax
+ movl 20(%esp), %esi
+ subl (%esi), %edx
+ movl 12(%esp), %edi
+ movl %edx, (%edi)
+ movl 4(%ecx), %edx
+ sbbl 4(%esi), %edx
+ movl %edx, 4(%edi)
+ movl 8(%ecx), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, 8(%edi)
+ movl 12(%ecx), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%edi)
+ movl 16(%ecx), %edx
+ sbbl 16(%esi), %edx
+ movl %edx, 16(%edi)
+ movl 20(%esi), %edx
+ movl 20(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 20(%edi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end87:
+ .size mcl_fp_subPre6Lbmi2, .Lfunc_end87-mcl_fp_subPre6Lbmi2
+
+ .globl mcl_fp_shr1_6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_6Lbmi2,@function
+mcl_fp_shr1_6Lbmi2: # @mcl_fp_shr1_6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl 20(%eax), %ecx
+ movl 16(%eax), %edx
+ movl 12(%eax), %esi
+ movl 8(%eax), %edi
+ movl (%eax), %ebx
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %ebx
+ movl 20(%esp), %ebp
+ movl %ebx, (%ebp)
+ shrdl $1, %edi, %eax
+ movl %eax, 4(%ebp)
+ shrdl $1, %esi, %edi
+ movl %edi, 8(%ebp)
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ebp)
+ shrdl $1, %ecx, %edx
+ movl %edx, 16(%ebp)
+ shrl %ecx
+ movl %ecx, 20(%ebp)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end88:
+ .size mcl_fp_shr1_6Lbmi2, .Lfunc_end88-mcl_fp_shr1_6Lbmi2
+
+ .globl mcl_fp_add6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add6Lbmi2,@function
+mcl_fp_add6Lbmi2: # @mcl_fp_add6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $12, %esp
+ movl 40(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ebp
+ movl 36(%esp), %ebx
+ addl (%ebx), %edx
+ adcl 4(%ebx), %ebp
+ movl 8(%eax), %ecx
+ adcl 8(%ebx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %edi
+ adcl 12(%eax), %ecx
+ adcl 16(%eax), %edi
+ movl 20(%ebx), %ebx
+ adcl 20(%eax), %ebx
+ movl 32(%esp), %eax
+ movl %edx, (%eax)
+ movl %ebp, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %ecx, 12(%eax)
+ movl %edi, 16(%eax)
+ movl %ebx, 20(%eax)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 44(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 8(%esp), %edx # 4-byte Reload
+ movl 44(%esp), %esi
+ sbbl 4(%esi), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ sbbl 8(%esi), %edx
+ sbbl 12(%esi), %ebp
+ sbbl 16(%esi), %edi
+ sbbl 20(%esi), %ebx
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB89_2
+# BB#1: # %nocarry
+ movl (%esp), %eax # 4-byte Reload
+ movl 32(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ecx)
+ movl %edx, 8(%ecx)
+ movl %ebp, 12(%ecx)
+ movl %edi, 16(%ecx)
+ movl %ebx, 20(%ecx)
+.LBB89_2: # %carry
+ addl $12, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end89:
+ .size mcl_fp_add6Lbmi2, .Lfunc_end89-mcl_fp_add6Lbmi2
+
+ .globl mcl_fp_addNF6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF6Lbmi2,@function
+mcl_fp_addNF6Lbmi2: # @mcl_fp_addNF6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 68(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 64(%esp), %ebp
+ addl (%ebp), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ adcl 4(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 20(%eax), %edx
+ movl 16(%eax), %esi
+ movl 12(%eax), %edi
+ movl 8(%eax), %eax
+ adcl 8(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 12(%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 16(%ebp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%ebp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ movl 72(%esp), %ebx
+ subl (%ebx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ movl 72(%esp), %ecx
+ sbbl 4(%ecx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ sbbl 8(%ecx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 12(%ecx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl %esi, %edi
+ sbbl 16(%ecx), %edi
+ movl %edx, %esi
+ sbbl 20(%ecx), %esi
+ movl %esi, %ebx
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ js .LBB90_2
+# BB#1:
+ movl (%esp), %eax # 4-byte Reload
+.LBB90_2:
+ movl 60(%esp), %ebx
+ movl %eax, (%ebx)
+ movl 20(%esp), %ecx # 4-byte Reload
+ js .LBB90_4
+# BB#3:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB90_4:
+ movl %ecx, 4(%ebx)
+ movl 36(%esp), %eax # 4-byte Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ movl 24(%esp), %ecx # 4-byte Reload
+ js .LBB90_6
+# BB#5:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB90_6:
+ movl %ecx, 8(%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ js .LBB90_8
+# BB#7:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB90_8:
+ movl %edx, 12(%ebx)
+ js .LBB90_10
+# BB#9:
+ movl %edi, %ecx
+.LBB90_10:
+ movl %ecx, 16(%ebx)
+ js .LBB90_12
+# BB#11:
+ movl %esi, %eax
+.LBB90_12:
+ movl %eax, 20(%ebx)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end90:
+ .size mcl_fp_addNF6Lbmi2, .Lfunc_end90-mcl_fp_addNF6Lbmi2
+
+ .globl mcl_fp_sub6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub6Lbmi2,@function
+mcl_fp_sub6Lbmi2: # @mcl_fp_sub6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 40(%esp), %ebx
+ movl (%ebx), %esi
+ movl 4(%ebx), %edi
+ movl 44(%esp), %ecx
+ subl (%ecx), %esi
+ sbbl 4(%ecx), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl 8(%ebx), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ sbbl 12(%ecx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 16(%ebx), %ebp
+ sbbl 16(%ecx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 20(%ebx), %edx
+ sbbl 20(%ecx), %edx
+ movl $0, %ecx
+ sbbl $0, %ecx
+ testb $1, %cl
+ movl 36(%esp), %ebx
+ movl %esi, (%ebx)
+ movl %edi, 4(%ebx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl %eax, 12(%ebx)
+ movl %ebp, 16(%ebx)
+ movl %edx, 20(%ebx)
+ je .LBB91_2
+# BB#1: # %carry
+ movl 48(%esp), %ecx
+ addl (%ecx), %esi
+ movl %esi, (%ebx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 4(%ecx), %eax
+ adcl 8(%ecx), %edi
+ movl %eax, 4(%ebx)
+ movl 12(%ecx), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl %eax, 12(%ebx)
+ movl 16(%ecx), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ebx)
+ movl 20(%ecx), %eax
+ adcl %edx, %eax
+ movl %eax, 20(%ebx)
+.LBB91_2: # %nocarry
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end91:
+ .size mcl_fp_sub6Lbmi2, .Lfunc_end91-mcl_fp_sub6Lbmi2
+
+ .globl mcl_fp_subNF6Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF6Lbmi2,@function
+mcl_fp_subNF6Lbmi2: # @mcl_fp_subNF6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 48(%esp), %ebx
+ movl 20(%ebx), %esi
+ movl (%ebx), %ecx
+ movl 4(%ebx), %eax
+ movl 52(%esp), %ebp
+ subl (%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 4(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 16(%ebx), %eax
+ movl 12(%ebx), %ecx
+ movl 8(%ebx), %edx
+ sbbl 8(%ebp), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl 16(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %edx
+ sbbl 20(%ebp), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl %edx, %ebp
+ sarl $31, %ebp
+ movl %ebp, %ecx
+ addl %ecx, %ecx
+ movl %ebp, %eax
+ adcl %eax, %eax
+ shrl $31, %edx
+ orl %ecx, %edx
+ movl 56(%esp), %ebx
+ andl 4(%ebx), %eax
+ andl (%ebx), %edx
+ movl 20(%ebx), %edi
+ andl %ebp, %edi
+ movl 16(%ebx), %esi
+ andl %ebp, %esi
+ movl 12(%ebx), %ecx
+ andl %ebp, %ecx
+ andl 8(%ebx), %ebp
+ addl 8(%esp), %edx # 4-byte Folded Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl 44(%esp), %ebx
+ movl %edx, (%ebx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 4(%ebx)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 8(%ebx)
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, 12(%ebx)
+ movl %esi, 16(%ebx)
+ adcl (%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%ebx)
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end92:
+ .size mcl_fp_subNF6Lbmi2, .Lfunc_end92-mcl_fp_subNF6Lbmi2
+
+ .globl mcl_fpDbl_add6Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add6Lbmi2,@function
+mcl_fpDbl_add6Lbmi2: # @mcl_fpDbl_add6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 64(%esp), %edx
+ movl 60(%esp), %ecx
+ movl 12(%ecx), %esi
+ movl 16(%ecx), %eax
+ movl 8(%edx), %edi
+ movl (%edx), %ebx
+ addl (%ecx), %ebx
+ movl 56(%esp), %ebp
+ movl %ebx, (%ebp)
+ movl 4(%edx), %ebx
+ adcl 4(%ecx), %ebx
+ adcl 8(%ecx), %edi
+ adcl 12(%edx), %esi
+ adcl 16(%edx), %eax
+ movl %ebx, 4(%ebp)
+ movl %edx, %ebx
+ movl 32(%ebx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %edi, 8(%ebp)
+ movl 20(%ebx), %edi
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ adcl %edi, %esi
+ movl 24(%ebx), %edi
+ movl %eax, 16(%ebp)
+ movl 24(%ecx), %edx
+ adcl %edi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 28(%ebx), %edi
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %eax
+ adcl %edi, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 32(%ecx), %ebp
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 36(%ebx), %esi
+ movl %ebx, %edi
+ movl 36(%ecx), %ebx
+ adcl %esi, %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 40(%edi), %esi
+ movl 40(%ecx), %edi
+ adcl %esi, %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 64(%esp), %esi
+ movl 44(%esi), %esi
+ movl 44(%ecx), %ecx
+ adcl %esi, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 68(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %edx
+ sbbl 4(%edx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 8(%edx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ sbbl 12(%edx), %ebp
+ movl %edi, %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ sbbl 16(%edx), %ebx
+ movl %edi, %eax
+ sbbl 20(%edx), %eax
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB93_2
+# BB#1:
+ movl %eax, %edi
+.LBB93_2:
+ testb %cl, %cl
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl 16(%esp), %edx # 4-byte Reload
+ jne .LBB93_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB93_4:
+ movl 56(%esp), %eax
+ movl %ecx, 24(%eax)
+ movl %edx, 28(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl 24(%esp), %edx # 4-byte Reload
+ jne .LBB93_6
+# BB#5:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB93_6:
+ movl %edx, 32(%eax)
+ movl 28(%esp), %edx # 4-byte Reload
+ jne .LBB93_8
+# BB#7:
+ movl %ebp, %edx
+.LBB93_8:
+ movl %edx, 36(%eax)
+ jne .LBB93_10
+# BB#9:
+ movl %ebx, %ecx
+.LBB93_10:
+ movl %ecx, 40(%eax)
+ movl %edi, 44(%eax)
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end93:
+ .size mcl_fpDbl_add6Lbmi2, .Lfunc_end93-mcl_fpDbl_add6Lbmi2
+
+ .globl mcl_fpDbl_sub6Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub6Lbmi2,@function
+mcl_fpDbl_sub6Lbmi2: # @mcl_fpDbl_sub6Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 48(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %edi
+ movl 52(%esp), %esi
+ subl (%esi), %eax
+ sbbl 4(%esi), %edi
+ movl 8(%edx), %ebx
+ sbbl 8(%esi), %ebx
+ movl 44(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edx), %eax
+ sbbl 12(%esi), %eax
+ movl %edi, 4(%ecx)
+ movl 16(%edx), %edi
+ sbbl 16(%esi), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%esi), %ebx
+ movl %eax, 12(%ecx)
+ movl 20(%edx), %eax
+ sbbl %ebx, %eax
+ movl 24(%esi), %ebx
+ movl %edi, 16(%ecx)
+ movl 24(%edx), %edi
+ sbbl %ebx, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 28(%esi), %edi
+ movl %eax, 20(%ecx)
+ movl 28(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 32(%esi), %edi
+ movl 32(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 36(%esi), %edi
+ movl 36(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 40(%esi), %edi
+ movl 40(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 44(%esi), %esi
+ movl 44(%edx), %eax
+ sbbl %esi, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl $0, %ebx
+ sbbl $0, %ebx
+ andl $1, %ebx
+ movl 56(%esp), %eax
+ jne .LBB94_1
+# BB#2:
+ xorl %edx, %edx
+ jmp .LBB94_3
+.LBB94_1:
+ movl 20(%eax), %edx
+.LBB94_3:
+ testb %bl, %bl
+ jne .LBB94_4
+# BB#5:
+ movl $0, %esi
+ movl $0, %edi
+ jmp .LBB94_6
+.LBB94_4:
+ movl (%eax), %edi
+ movl 4(%eax), %esi
+.LBB94_6:
+ jne .LBB94_7
+# BB#8:
+ movl $0, %ebx
+ jmp .LBB94_9
+.LBB94_7:
+ movl 16(%eax), %ebx
+.LBB94_9:
+ jne .LBB94_10
+# BB#11:
+ movl $0, %ebp
+ jmp .LBB94_12
+.LBB94_10:
+ movl 12(%eax), %ebp
+.LBB94_12:
+ jne .LBB94_13
+# BB#14:
+ xorl %eax, %eax
+ jmp .LBB94_15
+.LBB94_13:
+ movl 8(%eax), %eax
+.LBB94_15:
+ addl 8(%esp), %edi # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ movl %edi, 24(%ecx)
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 28(%ecx)
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 36(%ecx)
+ movl %ebx, 40(%ecx)
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%ecx)
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end94:
+ .size mcl_fpDbl_sub6Lbmi2, .Lfunc_end94-mcl_fpDbl_sub6Lbmi2
+
+ .globl mcl_fp_mulUnitPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre7Lbmi2,@function
+mcl_fp_mulUnitPre7Lbmi2: # @mcl_fp_mulUnitPre7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 44(%esp), %edx
+ movl 40(%esp), %edi
+ mulxl 4(%edi), %ecx, %esi
+ mulxl (%edi), %ebx, %eax
+ movl %ebx, 12(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ mulxl 8(%edi), %ecx, %eax
+ adcl %esi, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ mulxl 12(%edi), %ebx, %ecx
+ adcl %eax, %ebx
+ mulxl 16(%edi), %esi, %ebp
+ adcl %ecx, %esi
+ mulxl 20(%edi), %ecx, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ adcl %ebp, %ecx
+ mulxl 24(%edi), %edx, %edi
+ movl 36(%esp), %eax
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%eax)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ movl 4(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl %ebx, 12(%eax)
+ movl %esi, 16(%eax)
+ movl %ecx, 20(%eax)
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%eax)
+ adcl $0, %edi
+ movl %edi, 28(%eax)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end95:
+ .size mcl_fp_mulUnitPre7Lbmi2, .Lfunc_end95-mcl_fp_mulUnitPre7Lbmi2
+
+ .globl mcl_fpDbl_mulPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre7Lbmi2,@function
+mcl_fpDbl_mulPre7Lbmi2: # @mcl_fpDbl_mulPre7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 96(%esp), %eax
+ movl (%eax), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 100(%esp), %eax
+ movl (%eax), %ebp
+ mulxl %ebp, %ecx, %eax
+ movl %esi, %edx
+ mulxl %ebp, %edx, %esi
+ movl %edx, 40(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl 8(%edi), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ mulxl %ebp, %edi, %ecx
+ adcl %eax, %edi
+ movl 12(%ebx), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mulxl %ebp, %ebx, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl %ecx, %ebx
+ movl 16(%eax), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ mulxl %ebp, %ecx, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl 20(%eax), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl %ebp, %edx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl 24(%eax), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %ebp, %eax, %edx
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%eax)
+ adcl $0, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl 4(%eax), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ addl %esi, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ mulxl %eax, %esi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl 60(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edi, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ movl 56(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebx, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl %ecx, %ebx
+ movl 52(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 40(%esp), %esi # 4-byte Folded Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ adcl 56(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 64(%esp) # 4-byte Folded Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%eax)
+ movl 96(%esp), %ecx
+ movl (%ecx), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl 8(%eax), %eax
+ mulxl %eax, %edx, %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ addl %esi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 4(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ mulxl %eax, %edx, %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl %edi, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 8(%ecx), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl %ebx, %esi
+ movl %esi, %edi
+ movl 12(%ecx), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %eax, %edx, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 16(%ecx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %eax, %ebx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl 20(%ecx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %eax, %ebp, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 24(%ecx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ mulxl %eax, %ecx, %edx
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ movl 20(%esp), %eax # 4-byte Reload
+ addl %eax, 68(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 60(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ adcl %edx, %esi
+ movl 92(%esp), %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%eax)
+ movl 100(%esp), %eax
+ movl 12(%eax), %eax
+ movl 56(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ addl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edi, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl %ebx, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebx, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebp, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl %ecx, %ebp
+ movl 28(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %eax
+ adcl %esi, %ecx
+ movl %ecx, %edx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 32(%esp), %esi # 4-byte Reload
+ addl %esi, 64(%esp) # 4-byte Folded Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl %esi, 68(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl %eax, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 96(%esp), %ecx
+ movl (%ecx), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl 16(%eax), %esi
+ mulxl %esi, %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ addl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 4(%ecx), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ mulxl %esi, %eax, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl %edi, %eax
+ movl %eax, %edi
+ movl 8(%ecx), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ mulxl %esi, %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ mulxl %esi, %eax, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl %ebx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 16(%ecx), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl %esi, %ebx, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl 20(%ecx), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %esi, %edx, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %eax
+ movl 24(%ecx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %esi, %ebp, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl %edi, %esi
+ addl 28(%esp), %esi # 4-byte Folded Reload
+ movl 12(%esp), %edi # 4-byte Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl %edx, 68(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 36(%esp), %edx # 4-byte Reload
+ movl %edx, 16(%eax)
+ movl 100(%esp), %eax
+ movl 20(%eax), %eax
+ movl 60(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ addl %esi, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ mulxl %eax, %esi, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl 56(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebx, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %edi
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl %ebp, %edx
+ movl %edx, %ebp
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %eax
+ sbbl %edx, %edx
+ andl $1, %edx
+ addl 36(%esp), %esi # 4-byte Folded Reload
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 60(%esp), %edx # 4-byte Reload
+ movl %edx, 20(%eax)
+ movl 100(%esp), %eax
+ movl 24(%eax), %edx
+ movl 96(%esp), %eax
+ mulxl (%eax), %ebp, %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ addl %esi, %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ mulxl 4(%eax), %esi, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl %ecx, %esi
+ movl %esi, %ebp
+ mulxl 8(%eax), %ecx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl %ebx, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ mulxl 12(%eax), %ebx, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ mulxl 16(%eax), %edi, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ mulxl 20(%eax), %esi, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ mulxl 24(%eax), %edx, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 68(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl 92(%esp), %eax
+ movl 64(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl %ebx, 36(%eax)
+ movl %edi, 40(%eax)
+ movl %esi, 44(%eax)
+ movl %edx, 48(%eax)
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end96:
+ .size mcl_fpDbl_mulPre7Lbmi2, .Lfunc_end96-mcl_fpDbl_mulPre7Lbmi2
+
+ .globl mcl_fpDbl_sqrPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre7Lbmi2,@function
+mcl_fpDbl_sqrPre7Lbmi2: # @mcl_fpDbl_sqrPre7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $80, %esp
+ movl 104(%esp), %ecx
+ movl (%ecx), %ebx
+ movl 4(%ecx), %eax
+ movl %eax, %edx
+ mulxl %ebx, %esi, %edi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl %ebx, %ebp, %edx
+ movl %ebp, 44(%esp) # 4-byte Spill
+ addl %esi, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 8(%ecx), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ mulxl %ebx, %edx, %esi
+ adcl %edi, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 12(%ecx), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ mulxl %ebx, %edi, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl 16(%ecx), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ mulxl %ebx, %esi, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl 20(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ mulxl %ebx, %edx, %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 24(%ecx), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl %ebx, %ecx, %ebx
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx
+ movl 44(%esp), %edx # 4-byte Reload
+ movl %edx, (%ecx)
+ adcl $0, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ addl %edx, 72(%esp) # 4-byte Folded Spill
+ movl %eax, %edx
+ mulxl %eax, %ebx, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ebp, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ movl 64(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edi, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl 60(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %esi
+ movl 52(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %ecx, %eax
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %edx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl 76(%esp), %ebx # 4-byte Folded Reload
+ adcl 56(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl %esi, 68(%esp) # 4-byte Folded Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %eax, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ movl 104(%esp), %esi
+ movl (%esi), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 8(%esi), %ecx
+ mulxl %ecx, %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ addl %ebx, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 4(%esi), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %esi, %ebx
+ mulxl %ecx, %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ mulxl %ecx, %eax, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl %edi, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ movl %eax, %edx
+ mulxl %ecx, %edi, %edx
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl %edi, %edx
+ movl %edx, %esi
+ movl 16(%ebx), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %ecx, %edx, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %edi
+ movl 20(%ebx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ mulxl %ecx, %ebx, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl 24(%ebp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ movl 24(%esp), %edx # 4-byte Reload
+ addl %edx, 64(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl %edx, 60(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebx # 4-byte Folded Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ addl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl %eax, %edx
+ mulxl %eax, %edx, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl %ebx, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl %ecx, %edx
+ movl %edx, %esi
+ movl 36(%esp), %edx # 4-byte Reload
+ mulxl %eax, %edi, %eax
+ adcl %ebp, %edi
+ movl %edi, %edx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 52(%esp), %edi # 4-byte Reload
+ addl %edi, 68(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl 28(%esp), %ebx # 4-byte Reload
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl 24(%esp), %ebp # 4-byte Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ adcl %eax, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 104(%esp), %ecx
+ movl (%ecx), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ mulxl %eax, %edx, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ addl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 4(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ mulxl %eax, %edx, %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl %edi, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 8(%ecx), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl %eax, %edx, %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl %ebx, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 12(%ecx), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ mulxl %eax, %ecx, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl %ebp, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl %eax, %edx
+ mulxl %eax, %ecx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 20(%esi), %ecx
+ movl %ecx, %edx
+ mulxl %eax, %edx, %ebp
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl %edx, %edi
+ movl 24(%esi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ mulxl %eax, %esi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 12(%esp), %eax # 4-byte Reload
+ addl 24(%esp), %eax # 4-byte Folded Reload
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl %edx, 72(%esp) # 4-byte Folded Spill
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl %edx, 64(%esp) # 4-byte Folded Spill
+ movl 8(%esp), %edx # 4-byte Reload
+ adcl %edx, 68(%esp) # 4-byte Folded Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl %ebp, %esi
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %edx, %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ addl %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %ebp, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %edi, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %edx
+ mulxl %ecx, %edx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl %esi, %edx
+ movl %edx, %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ mulxl %ecx, %ecx, %edx
+ adcl %ebx, %ecx
+ movl %ecx, %ebx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl %esi, 72(%esp) # 4-byte Folded Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ adcl %edx, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%eax)
+ movl 104(%esp), %eax
+ movl 24(%eax), %edx
+ mulxl (%eax), %ecx, %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ addl %ebp, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ mulxl 4(%eax), %ecx, %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ adcl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ mulxl 8(%eax), %ecx, %ebx
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl %edi, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ mulxl 12(%eax), %ebx, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl %esi, %ebx
+ mulxl 16(%eax), %edi, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ mulxl 20(%eax), %esi, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ mulxl %edx, %edx, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl %eax, 76(%esp) # 4-byte Folded Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl 100(%esp), %eax
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 64(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 76(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl %ebx, 36(%eax)
+ movl %edi, 40(%eax)
+ movl %esi, 44(%eax)
+ movl %edx, 48(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ addl $80, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end97:
+ .size mcl_fpDbl_sqrPre7Lbmi2, .Lfunc_end97-mcl_fpDbl_sqrPre7Lbmi2
+
+ .globl mcl_fp_mont7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont7Lbmi2,@function
+mcl_fp_mont7Lbmi2: # @mcl_fp_mont7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $116, %esp
+ movl 140(%esp), %eax
+ movl 24(%eax), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx
+ movl (%ecx), %ecx
+ mulxl %ecx, %edx, %esi
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 20(%eax), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ mulxl %ecx, %edi, %edx
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 16(%eax), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ mulxl %ecx, %edx, %ebx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 8(%eax), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ mulxl %ecx, %edi, %edx
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl (%eax), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ mulxl %ecx, %edi, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ mulxl %ecx, %ebp, %edx
+ addl %edi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl %edx, 48(%esp) # 4-byte Folded Spill
+ movl 12(%eax), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %eax
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 148(%esp), %ebx
+ movl -4(%ebx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ imull %eax, %edx
+ movl (%ebx), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 4(%ebx), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ mulxl %ecx, %esi, %ecx
+ mulxl %edi, %edi, %eax
+ movl %edi, 8(%esp) # 4-byte Spill
+ addl %esi, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 8(%ebx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ mulxl %esi, %eax, %esi
+ adcl %ecx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ mulxl %ecx, %eax, %ecx
+ adcl %esi, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 16(%ebx), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ mulxl %esi, %eax, %esi
+ adcl %ecx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ mulxl %eax, %eax, %edi
+ adcl %esi, %eax
+ movl %eax, %ecx
+ movl 24(%ebx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ mulxl %eax, %edx, %eax
+ adcl %edi, %edx
+ adcl $0, %eax
+ addl %ebp, 8(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl %esi, 40(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl %esi, 32(%esp) # 4-byte Folded Spill
+ movl 12(%esp), %esi # 4-byte Reload
+ adcl %esi, 28(%esp) # 4-byte Folded Spill
+ movl 16(%esp), %esi # 4-byte Reload
+ adcl %esi, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 144(%esp), %edx
+ movl 4(%edx), %edx
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ mulxl 84(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ mulxl 56(%esp), %ebx, %esi # 4-byte Folded Reload
+ mulxl 60(%esp), %eax, %edi # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ addl %ebx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ mulxl 68(%esp), %edi, %eax # 4-byte Folded Reload
+ adcl %esi, %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ mulxl 64(%esp), %ebp, %ebx # 4-byte Folded Reload
+ adcl %eax, %ebp
+ mulxl 80(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl %ecx, %eax
+ movl %eax, %ecx
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 36(%esp), %eax # 4-byte Reload
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, %ebx
+ sbbl %ecx, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl %eax, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, %edi
+ mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ecx, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload
+ adcl %eax, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ mulxl 88(%esp), %ebp, %ecx # 4-byte Folded Reload
+ adcl %eax, %ebp
+ adcl $0, %ecx
+ movl %ecx, %edx
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 8(%esp), %ecx # 4-byte Reload
+ addl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 32(%esp) # 4-byte Folded Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 28(%esp) # 4-byte Folded Spill
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl 8(%eax), %edx
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ addl %ecx, %edi
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ mulxl 80(%esp), %edx, %ecx # 4-byte Folded Reload
+ adcl %eax, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl %edi, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl %ebp, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ecx, %esi
+ movl %esi, %edi
+ mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload
+ adcl %eax, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ mulxl 88(%esp), %ebp, %ecx # 4-byte Folded Reload
+ adcl %eax, %ebp
+ adcl $0, %ecx
+ movl %ecx, %edx
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 8(%esp), %ecx # 4-byte Reload
+ addl 44(%esp), %ecx # 4-byte Folded Reload
+ movl 16(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 32(%esp) # 4-byte Folded Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 28(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl %ebx, 20(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl 12(%eax), %edx
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ addl %ecx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ mulxl 80(%esp), %edx, %ecx # 4-byte Folded Reload
+ adcl %eax, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 16(%esp), %edi # 4-byte Reload
+ adcl %edi, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl (%esp), %ebx # 4-byte Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ adcl %ebp, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ecx, %esi
+ movl %esi, %edi
+ mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload
+ adcl %eax, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ mulxl 88(%esp), %ebp, %ecx # 4-byte Folded Reload
+ adcl %eax, %ebp
+ adcl $0, %ecx
+ movl %ecx, %edx
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 8(%esp), %ecx # 4-byte Reload
+ addl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 32(%esp) # 4-byte Folded Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 28(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl %ebx, 20(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl 16(%eax), %edx
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ mulxl 68(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ addl %ecx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 64(%esp), %ebx, %eax # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ mulxl 80(%esp), %edx, %ecx # 4-byte Folded Reload
+ adcl %eax, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 16(%esp), %edi # 4-byte Reload
+ adcl %edi, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl (%esp), %ebx # 4-byte Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ adcl %ebp, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ecx, %esi
+ movl %esi, %edi
+ mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload
+ adcl %eax, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ mulxl 92(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ mulxl 88(%esp), %edx, %ecx # 4-byte Folded Reload
+ adcl %eax, %edx
+ movl %edx, %ebp
+ adcl $0, %ecx
+ movl %ecx, %edx
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 8(%esp), %ecx # 4-byte Reload
+ addl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 32(%esp) # 4-byte Folded Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 28(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl %ebx, 20(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl 20(%eax), %edx
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 68(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 56(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 60(%esp), %ebx, %edi # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ addl %ecx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %ebp, %ecx
+ movl %ecx, %ebp
+ mulxl 80(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl %eax, %ebx
+ adcl %esi, %ecx
+ movl %ecx, %esi
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 16(%esp), %edi # 4-byte Reload
+ adcl %edi, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebp
+ sbbl %eax, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 112(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 108(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ mulxl 104(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl %eax, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ecx, %esi
+ movl %esi, %edi
+ mulxl 96(%esp), %ecx, %esi # 4-byte Folded Reload
+ adcl %eax, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ mulxl 92(%esp), %eax, %ebx # 4-byte Folded Reload
+ adcl %esi, %eax
+ movl %eax, %esi
+ mulxl 88(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ movl %ecx, %ebx
+ adcl $0, %eax
+ movl %eax, %ecx
+ movl 44(%esp), %edx # 4-byte Reload
+ andl $1, %edx
+ movl 8(%esp), %eax # 4-byte Reload
+ addl 36(%esp), %eax # 4-byte Folded Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl %ebp, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 144(%esp), %edx
+ movl 24(%edx), %edx
+ mulxl 56(%esp), %ebx, %esi # 4-byte Folded Reload
+ mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ mulxl 72(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ mulxl 68(%esp), %ebp, %edi # 4-byte Folded Reload
+ adcl %esi, %ebp
+ mulxl 64(%esp), %eax, %esi # 4-byte Folded Reload
+ adcl %edi, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ mulxl 84(%esp), %edi, %eax # 4-byte Folded Reload
+ movl %edi, 84(%esp) # 4-byte Spill
+ mulxl 80(%esp), %ebx, %edx # 4-byte Folded Reload
+ adcl %esi, %ebx
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %esi
+ adcl %ecx, %eax
+ movl %eax, %ecx
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 60(%esp), %edi # 4-byte Reload
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl %eax, 68(%esp) # 4-byte Folded Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ imull %edi, %edx
+ mulxl 108(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 76(%esp) # 4-byte Spill
+ mulxl 112(%esp), %ecx, %esi # 4-byte Folded Reload
+ addl %eax, %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ mulxl 104(%esp), %eax, %edi # 4-byte Folded Reload
+ adcl %esi, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ mulxl 100(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %edi, %ecx
+ movl %edx, %edi
+ mulxl 96(%esp), %ebx, %ebp # 4-byte Folded Reload
+ adcl %eax, %ebx
+ mulxl 92(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ebp, %esi
+ movl %edi, %edx
+ mulxl 88(%esp), %edi, %ebp # 4-byte Folded Reload
+ adcl %eax, %edi
+ adcl $0, %ebp
+ andl $1, 64(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ addl 60(%esp), %eax # 4-byte Folded Reload
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 68(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ subl 108(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ sbbl 112(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ sbbl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ sbbl 100(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 100(%esp) # 4-byte Spill
+ movl %esi, %ebx
+ sbbl 96(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 104(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ sbbl 92(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 108(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ sbbl 88(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 112(%esp) # 4-byte Spill
+ sbbl $0, %eax
+ andl $1, %eax
+ movl %eax, %ecx
+ jne .LBB98_2
+# BB#1:
+ movl 60(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+.LBB98_2:
+ movl 136(%esp), %ebx
+ movl 80(%esp), %edx # 4-byte Reload
+ movl %edx, (%ebx)
+ movl %ebx, %edx
+ testb %cl, %cl
+ movl 84(%esp), %ebx # 4-byte Reload
+ jne .LBB98_4
+# BB#3:
+ movl 64(%esp), %ebx # 4-byte Reload
+.LBB98_4:
+ movl %ebx, 4(%edx)
+ movl 76(%esp), %ecx # 4-byte Reload
+ jne .LBB98_6
+# BB#5:
+ movl 72(%esp), %ecx # 4-byte Reload
+.LBB98_6:
+ movl %ecx, 8(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ jne .LBB98_8
+# BB#7:
+ movl 100(%esp), %eax # 4-byte Reload
+.LBB98_8:
+ movl %eax, 12(%edx)
+ jne .LBB98_10
+# BB#9:
+ movl 104(%esp), %esi # 4-byte Reload
+.LBB98_10:
+ movl %esi, 16(%edx)
+ jne .LBB98_12
+# BB#11:
+ movl 108(%esp), %edi # 4-byte Reload
+.LBB98_12:
+ movl %edi, 20(%edx)
+ jne .LBB98_14
+# BB#13:
+ movl 112(%esp), %ebp # 4-byte Reload
+.LBB98_14:
+ movl %ebp, 24(%edx)
+ addl $116, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end98:
+ .size mcl_fp_mont7Lbmi2, .Lfunc_end98-mcl_fp_mont7Lbmi2
+
+ .globl mcl_fp_montNF7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF7Lbmi2,@function
+mcl_fp_montNF7Lbmi2: # @mcl_fp_montNF7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $104, %esp
+ movl 128(%esp), %eax
+ movl (%eax), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx
+ movl (%ecx), %ebp
+ mulxl %ebp, %ecx, %esi
+ movl %edi, %edx
+ mulxl %ebp, %edi, %edx
+ movl %edi, 96(%esp) # 4-byte Spill
+ addl %ecx, %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 8(%eax), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ mulxl %ebp, %ecx, %edi
+ adcl %esi, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 12(%eax), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ mulxl %ebp, %ecx, %ebx
+ adcl %edi, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 16(%eax), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ mulxl %ebp, %edx, %ecx
+ adcl %ebx, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 20(%eax), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ mulxl %ebp, %edx, %esi
+ adcl %ecx, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 24(%eax), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ mulxl %ebp, %ebp, %eax
+ adcl %esi, %ebp
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 136(%esp), %edi
+ movl -4(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ movl %esi, %edx
+ imull %eax, %edx
+ movl (%edi), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ addl %esi, %ecx
+ movl 4(%edi), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ mulxl %ecx, %eax, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 8(%edi), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ mulxl %ecx, %eax, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%edi), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ mulxl %ecx, %esi, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 84(%esp), %esi # 4-byte Folded Reload
+ movl 16(%edi), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ mulxl %eax, %eax, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ movl 20(%edi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ mulxl %eax, %eax, %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebx
+ movl 24(%edi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ mulxl %eax, %edx, %eax
+ adcl %ebp, %edx
+ movl %edx, %edi
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ movl 28(%esp), %ebp # 4-byte Reload
+ addl %ebp, 36(%esp) # 4-byte Folded Spill
+ movl 24(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 4(%eax), %edx
+ mulxl 64(%esp), %ecx, %esi # 4-byte Folded Reload
+ mulxl 68(%esp), %edi, %eax # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ mulxl 60(%esp), %eax, %edi # 4-byte Folded Reload
+ adcl %esi, %eax
+ movl %eax, %ecx
+ mulxl 56(%esp), %esi, %ebx # 4-byte Folded Reload
+ adcl %edi, %esi
+ mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload
+ adcl %ebx, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ mulxl 48(%esp), %eax, %edi # 4-byte Folded Reload
+ adcl %ebp, %eax
+ movl %eax, %ebx
+ mulxl 44(%esp), %ebp, %eax # 4-byte Folded Reload
+ adcl %edi, %ebp
+ adcl $0, %eax
+ movl %eax, %edx
+ movl 8(%esp), %eax # 4-byte Reload
+ addl 36(%esp), %eax # 4-byte Folded Reload
+ movl 4(%esp), %edi # 4-byte Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 16(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 28(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl %eax, %ebx
+ imull 72(%esp), %edx # 4-byte Folded Reload
+ mulxl 100(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl %edi, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl 92(%esp), %edi, %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl %esi, %eax
+ movl %eax, %esi
+ mulxl 84(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ mulxl 80(%esp), %eax, %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ mulxl 76(%esp), %eax, %edx # 4-byte Folded Reload
+ adcl %ebp, %eax
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl 36(%esp), %ebp # 4-byte Reload
+ addl %ebp, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 28(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl %edx, %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 8(%eax), %edx
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 68(%esp), %esi, %edi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ addl %ecx, %edi
+ mulxl 60(%esp), %ecx, %ebx # 4-byte Folded Reload
+ adcl %eax, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ mulxl 56(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ebx, %esi
+ mulxl 52(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl %eax, %ebx
+ mulxl 48(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl %ecx, %eax
+ movl %eax, %ebp
+ mulxl 44(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ movl %ebx, %edx
+ imull 72(%esp), %edx # 4-byte Folded Reload
+ mulxl 100(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %ebp, 28(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ mulxl 96(%esp), %ebp, %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ adcl %edi, %ebp
+ mulxl 92(%esp), %ebx, %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl %ecx, %ebx
+ mulxl 88(%esp), %edi, %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ mulxl 84(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ mulxl 80(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ mulxl 76(%esp), %edx, %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 36(%esp) # 4-byte Spill
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 12(%eax), %edx
+ mulxl 64(%esp), %esi, %eax # 4-byte Folded Reload
+ mulxl 68(%esp), %edi, %ecx # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ addl %esi, %ecx
+ mulxl 60(%esp), %esi, %edi # 4-byte Folded Reload
+ adcl %eax, %esi
+ mulxl 56(%esp), %eax, %ebx # 4-byte Folded Reload
+ adcl %edi, %eax
+ movl %eax, %edi
+ mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload
+ adcl %ebx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 48(%esp), %eax, %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl %eax, %ebx
+ mulxl 44(%esp), %ebp, %eax # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ addl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl %edx, 24(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, %edx
+ imull 72(%esp), %edx # 4-byte Folded Reload
+ mulxl 100(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ addl %edi, %eax
+ mulxl 96(%esp), %edi, %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ mulxl 92(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl %esi, %eax
+ movl %eax, %esi
+ mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ mulxl 84(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ mulxl 80(%esp), %eax, %ebx # 4-byte Folded Reload
+ movl %ebx, (%esp) # 4-byte Spill
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 76(%esp), %eax, %edx # 4-byte Folded Reload
+ adcl %ebp, %eax
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %esi # 4-byte Reload
+ adcl %esi, 28(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 24(%esp) # 4-byte Folded Spill
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl %edx, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 16(%eax), %edx
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 68(%esp), %esi, %edi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ addl %ecx, %edi
+ mulxl 60(%esp), %ecx, %ebx # 4-byte Folded Reload
+ adcl %eax, %ecx
+ mulxl 56(%esp), %eax, %esi # 4-byte Folded Reload
+ adcl %ebx, %eax
+ movl %eax, %ebx
+ mulxl 52(%esp), %eax, %ebp # 4-byte Folded Reload
+ adcl %esi, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ mulxl 48(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl %eax, %esi
+ mulxl 44(%esp), %ebp, %eax # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %eax
+ movl %eax, %edx
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl %ebx, %edx
+ imull 72(%esp), %edx # 4-byte Folded Reload
+ mulxl 100(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ mulxl 96(%esp), %ebx, %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl %edi, %ebx
+ mulxl 92(%esp), %edi, %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ mulxl 84(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ mulxl 80(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ mulxl 76(%esp), %eax, %edx # 4-byte Folded Reload
+ adcl %ebp, %eax
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 28(%esp) # 4-byte Folded Spill
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 24(%esp) # 4-byte Folded Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl %edx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 20(%eax), %edx
+ mulxl 64(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 68(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ mulxl 60(%esp), %ebp, %ecx # 4-byte Folded Reload
+ adcl %eax, %ebp
+ mulxl 56(%esp), %eax, %edi # 4-byte Folded Reload
+ adcl %ecx, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ mulxl 52(%esp), %ecx, %ebx # 4-byte Folded Reload
+ adcl %edi, %ecx
+ mulxl 48(%esp), %eax, %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ adcl %ebx, %eax
+ movl %eax, %edi
+ mulxl 44(%esp), %ebx, %eax # 4-byte Folded Reload
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %eax
+ movl %eax, %edx
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, (%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edx
+ imull 72(%esp), %edx # 4-byte Folded Reload
+ mulxl 100(%esp), %eax, %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ mulxl 92(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl %ebp, %esi
+ mulxl 88(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ mulxl 84(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ mulxl 80(%esp), %ebp, %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ mulxl 76(%esp), %edi, %edx # 4-byte Folded Reload
+ adcl %ebx, %edi
+ movl %edi, %ebx
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl 28(%esp), %ebp # 4-byte Reload
+ addl %ebp, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ adcl %edx, %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 24(%eax), %edx
+ mulxl 64(%esp), %edi, %ebx # 4-byte Folded Reload
+ mulxl 68(%esp), %eax, %ebp # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ addl %edi, %ebp
+ mulxl 60(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ mulxl 56(%esp), %ebx, %ecx # 4-byte Folded Reload
+ adcl %eax, %ebx
+ mulxl 52(%esp), %esi, %edi # 4-byte Folded Reload
+ adcl %ecx, %esi
+ mulxl 48(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl %edi, %eax
+ movl %eax, %ecx
+ mulxl 44(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ movl 64(%esp), %edi # 4-byte Reload
+ addl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ movl 64(%esp), %eax # 4-byte Reload
+ imull %eax, %edx
+ mulxl 100(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ addl %eax, %esi
+ mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ mulxl 92(%esp), %eax, %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl %edi, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ mulxl 88(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl %ebx, %ebp
+ movl %ecx, %edx
+ mulxl 84(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, %edx
+ mulxl 80(%esp), %edi, %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ecx, %edx
+ mulxl 76(%esp), %ebx, %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl 36(%esp), %edx # 4-byte Reload
+ addl 56(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ subl 100(%esp), %ecx # 4-byte Folded Reload
+ sbbl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ sbbl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ sbbl 88(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 96(%esp) # 4-byte Spill
+ sbbl 84(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 100(%esp) # 4-byte Spill
+ sbbl 80(%esp), %ebx # 4-byte Folded Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 76(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ sarl $31, %eax
+ testl %eax, %eax
+ js .LBB99_2
+# BB#1:
+ movl %ecx, %edx
+.LBB99_2:
+ movl 124(%esp), %esi
+ movl %edx, (%esi)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB99_4
+# BB#3:
+ movl 52(%esp), %eax # 4-byte Reload
+.LBB99_4:
+ movl %eax, 4(%esi)
+ movl 68(%esp), %eax # 4-byte Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ js .LBB99_6
+# BB#5:
+ movl 92(%esp), %ebp # 4-byte Reload
+.LBB99_6:
+ movl %ebp, 8(%esi)
+ movl %esi, %ebp
+ movl 56(%esp), %esi # 4-byte Reload
+ js .LBB99_8
+# BB#7:
+ movl 96(%esp), %esi # 4-byte Reload
+.LBB99_8:
+ movl %esi, 12(%ebp)
+ js .LBB99_10
+# BB#9:
+ movl 100(%esp), %edx # 4-byte Reload
+.LBB99_10:
+ movl %edx, 16(%ebp)
+ js .LBB99_12
+# BB#11:
+ movl %ebx, %ecx
+.LBB99_12:
+ movl %ecx, 20(%ebp)
+ js .LBB99_14
+# BB#13:
+ movl %edi, %eax
+.LBB99_14:
+ movl %eax, 24(%ebp)
+ addl $104, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end99:
+ .size mcl_fp_montNF7Lbmi2, .Lfunc_end99-mcl_fp_montNF7Lbmi2
+
+ .globl mcl_fp_montRed7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed7Lbmi2,@function
+mcl_fp_montRed7Lbmi2: # @mcl_fp_montRed7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ movl 136(%esp), %edi
+ movl -4(%edi), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl (%edi), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl (%eax), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ imull %ecx, %edx
+ movl 24(%edi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ mulxl %ecx, %ebx, %ecx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 20(%edi), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ mulxl %ecx, %ebx, %ecx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 16(%edi), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ mulxl %ecx, %ebx, %ecx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 4(%edi), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ mulxl %ecx, %ecx, %ebp
+ mulxl %esi, %ebx, %esi
+ movl %ebx, 64(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 8(%edi), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ mulxl %ecx, %esi, %ecx
+ adcl %ebp, %esi
+ movl %esi, %ebp
+ movl 12(%edi), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ mulxl %esi, %esi, %edx
+ adcl %ecx, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %edi
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl 64(%esp), %ebx # 4-byte Reload
+ addl 72(%esp), %ebx # 4-byte Folded Reload
+ movl 28(%esp), %ebx # 4-byte Reload
+ adcl 4(%eax), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ adcl 8(%eax), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 12(%eax), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 16(%eax), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 20(%eax), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 24(%eax), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 28(%eax), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%eax), %ecx
+ movl 48(%eax), %edx
+ movl 44(%eax), %esi
+ movl 40(%eax), %edi
+ movl 36(%eax), %ebp
+ movl 32(%eax), %eax
+ adcl $0, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 100(%esp), %eax, %ebx # 4-byte Folded Reload
+ movl %eax, 4(%esp) # 4-byte Spill
+ mulxl 88(%esp), %ebp, %eax # 4-byte Folded Reload
+ movl %eax, (%esp) # 4-byte Spill
+ mulxl 96(%esp), %ecx, %eax # 4-byte Folded Reload
+ mulxl 92(%esp), %edi, %esi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl %ebp, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 84(%esp), %esi, %ebp # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ mulxl 104(%esp), %ecx, %edi # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ mulxl 80(%esp), %ebx, %edx # 4-byte Folded Reload
+ adcl %edi, %ebx
+ adcl $0, %edx
+ movl 8(%esp), %eax # 4-byte Reload
+ addl 28(%esp), %eax # 4-byte Folded Reload
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 36(%esp) # 4-byte Spill
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ adcl $0, 60(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %edi, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ mulxl 84(%esp), %ebx, %ebp # 4-byte Folded Reload
+ mulxl 96(%esp), %eax, %esi # 4-byte Folded Reload
+ mulxl 92(%esp), %ecx, %edi # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ addl %eax, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ mulxl 88(%esp), %edi, %eax # 4-byte Folded Reload
+ adcl %esi, %edi
+ adcl %ebx, %eax
+ movl %eax, %ebx
+ mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ebp, %esi
+ mulxl 104(%esp), %edx, %ecx # 4-byte Folded Reload
+ adcl %eax, %edx
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 12(%esp), %ebp # 4-byte Reload
+ addl 32(%esp), %ebp # 4-byte Folded Reload
+ movl 8(%esp), %ebp # 4-byte Reload
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ adcl $0, 60(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ebp, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 84(%esp), %edi, %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ mulxl 96(%esp), %eax, %ebx # 4-byte Folded Reload
+ mulxl 92(%esp), %esi, %ecx # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ addl %eax, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ mulxl 88(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ebx, %esi
+ movl %esi, %ebx
+ adcl %edi, %eax
+ movl %eax, %edi
+ mulxl 100(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ mulxl 104(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl %ecx, %edx
+ movl %edx, %ecx
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %ebp, 20(%esp) # 4-byte Folded Spill
+ movl 16(%esp), %ebp # 4-byte Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, 60(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ebp, %edx
+ movl %ebp, %edi
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ mulxl 84(%esp), %eax, %ebx # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ mulxl 96(%esp), %eax, %ecx # 4-byte Folded Reload
+ mulxl 92(%esp), %esi, %ebp # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ addl %eax, %ebp
+ mulxl 88(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ecx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ecx
+ mulxl 100(%esp), %esi, %eax # 4-byte Folded Reload
+ adcl %ebx, %esi
+ mulxl 104(%esp), %ebx, %edx # 4-byte Folded Reload
+ adcl %eax, %ebx
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edi, 20(%esp) # 4-byte Folded Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl %edi, 44(%esp) # 4-byte Folded Spill
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ebp, %edx
+ imull 76(%esp), %edx # 4-byte Folded Reload
+ mulxl 80(%esp), %ecx, %eax # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl %eax, 60(%esp) # 4-byte Spill
+ mulxl 84(%esp), %ebx, %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ mulxl 96(%esp), %ecx, %edi # 4-byte Folded Reload
+ mulxl 92(%esp), %esi, %eax # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ mulxl 88(%esp), %ecx, %eax # 4-byte Folded Reload
+ adcl %edi, %ecx
+ movl %ecx, %edi
+ adcl %ebx, %eax
+ movl %eax, %ebx
+ mulxl 100(%esp), %esi, %ecx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ mulxl 104(%esp), %edx, %eax # 4-byte Folded Reload
+ adcl %ecx, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl %ebp, 20(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl %esi, 48(%esp) # 4-byte Folded Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ imull %eax, %edx
+ mulxl 92(%esp), %eax, %ecx # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ mulxl 96(%esp), %eax, %esi # 4-byte Folded Reload
+ addl %ecx, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ mulxl 88(%esp), %eax, %edi # 4-byte Folded Reload
+ adcl %esi, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, %esi
+ mulxl 84(%esp), %ebp, %eax # 4-byte Folded Reload
+ adcl %edi, %ebp
+ mulxl 100(%esp), %ecx, %edi # 4-byte Folded Reload
+ adcl %eax, %ecx
+ mulxl 104(%esp), %ebx, %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl %edi, %ebx
+ mulxl 80(%esp), %edi, %eax # 4-byte Folded Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %eax
+ movl 64(%esp), %edx # 4-byte Reload
+ addl 52(%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %edx, %ebp
+ subl 92(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ sbbl 96(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ sbbl 88(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ sbbl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ sbbl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl %edi, %ecx
+ sbbl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl %eax, %ecx
+ sbbl 80(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB100_2
+# BB#1:
+ movl 68(%esp), %ebp # 4-byte Reload
+.LBB100_2:
+ movl 128(%esp), %edx
+ movl %ebp, (%edx)
+ movl %esi, %eax
+ testb %al, %al
+ movl 76(%esp), %ebp # 4-byte Reload
+ jne .LBB100_4
+# BB#3:
+ movl 72(%esp), %ebp # 4-byte Reload
+.LBB100_4:
+ movl %ebp, 4(%edx)
+ movl %ecx, %eax
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB100_6
+# BB#5:
+ movl 88(%esp), %ecx # 4-byte Reload
+.LBB100_6:
+ movl %ecx, 8(%edx)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB100_8
+# BB#7:
+ movl 92(%esp), %ecx # 4-byte Reload
+.LBB100_8:
+ movl %ecx, 12(%edx)
+ jne .LBB100_10
+# BB#9:
+ movl 96(%esp), %ebx # 4-byte Reload
+.LBB100_10:
+ movl %ebx, 16(%edx)
+ jne .LBB100_12
+# BB#11:
+ movl 104(%esp), %edi # 4-byte Reload
+.LBB100_12:
+ movl %edi, 20(%edx)
+ jne .LBB100_14
+# BB#13:
+ movl 100(%esp), %eax # 4-byte Reload
+.LBB100_14:
+ movl %eax, 24(%edx)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end100:
+ .size mcl_fp_montRed7Lbmi2, .Lfunc_end100-mcl_fp_montRed7Lbmi2
+
+ .globl mcl_fp_addPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre7Lbmi2,@function
+mcl_fp_addPre7Lbmi2: # @mcl_fp_addPre7Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 20(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %edi
+ adcl 8(%esi), %edi
+ movl 16(%esp), %ebx
+ movl %ecx, (%ebx)
+ movl 12(%esi), %ecx
+ movl %edx, 4(%ebx)
+ movl 16(%esi), %edx
+ adcl 12(%eax), %ecx
+ adcl 16(%eax), %edx
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %ecx, 12(%ebx)
+ movl 20(%esi), %ecx
+ adcl %edi, %ecx
+ movl %edx, 16(%ebx)
+ movl %ecx, 20(%ebx)
+ movl 24(%eax), %eax
+ movl 24(%esi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 24(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end101:
+ .size mcl_fp_addPre7Lbmi2, .Lfunc_end101-mcl_fp_addPre7Lbmi2
+
+ .globl mcl_fp_subPre7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre7Lbmi2,@function
+mcl_fp_subPre7Lbmi2: # @mcl_fp_subPre7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 28(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edi), %ebx
+ movl 20(%esp), %ebp
+ movl %edx, (%ebp)
+ movl 12(%ecx), %edx
+ sbbl 12(%edi), %edx
+ movl %esi, 4(%ebp)
+ movl 16(%ecx), %esi
+ sbbl 16(%edi), %esi
+ movl %ebx, 8(%ebp)
+ movl 20(%edi), %ebx
+ movl %edx, 12(%ebp)
+ movl 20(%ecx), %edx
+ sbbl %ebx, %edx
+ movl %esi, 16(%ebp)
+ movl %edx, 20(%ebp)
+ movl 24(%edi), %edx
+ movl 24(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 24(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end102:
+ .size mcl_fp_subPre7Lbmi2, .Lfunc_end102-mcl_fp_subPre7Lbmi2
+
+ .globl mcl_fp_shr1_7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_7Lbmi2,@function
+mcl_fp_shr1_7Lbmi2: # @mcl_fp_shr1_7Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 20(%esi)
+ shrl %eax
+ movl %eax, 24(%esi)
+ popl %esi
+ retl
+.Lfunc_end103:
+ .size mcl_fp_shr1_7Lbmi2, .Lfunc_end103-mcl_fp_shr1_7Lbmi2
+
+ .globl mcl_fp_add7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add7Lbmi2,@function
+mcl_fp_add7Lbmi2: # @mcl_fp_add7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %ebp
+ movl (%ebp), %eax
+ movl 4(%ebp), %edi
+ movl 44(%esp), %ecx
+ addl (%ecx), %eax
+ adcl 4(%ecx), %edi
+ movl 8(%ebp), %esi
+ adcl 8(%ecx), %esi
+ movl 12(%ecx), %edx
+ movl 16(%ecx), %ebx
+ adcl 12(%ebp), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 16(%ebp), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ movl 20(%ecx), %ebp
+ adcl 20(%ebx), %ebp
+ movl 24(%ecx), %edx
+ adcl 24(%ebx), %edx
+ movl 40(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, 4(%ecx)
+ movl %esi, 8(%ecx)
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%ecx)
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%ecx)
+ movl %ebp, 20(%ecx)
+ movl %edx, 24(%ecx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 52(%esp), %ecx
+ subl (%ecx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl 52(%esp), %eax
+ sbbl 4(%eax), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %eax, %edi
+ sbbl 8(%edi), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ecx
+ movl %ecx, %esi
+ sbbl 20(%edi), %ebp
+ sbbl 24(%edi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB104_2
+# BB#1: # %nocarry
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl 40(%esp), %eax
+ movl %eax, %ebx
+ movl %ecx, (%ebx)
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%ebx)
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebx)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebx)
+ movl %esi, 16(%ebx)
+ movl %ebp, 20(%ebx)
+ movl %edx, 24(%ebx)
+.LBB104_2: # %carry
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end104:
+ .size mcl_fp_add7Lbmi2, .Lfunc_end104-mcl_fp_add7Lbmi2
+
+ .globl mcl_fp_addNF7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF7Lbmi2,@function
+mcl_fp_addNF7Lbmi2: # @mcl_fp_addNF7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 80(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 76(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 4(%esi), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 20(%eax), %ebx
+ movl 16(%eax), %edi
+ movl 12(%eax), %ebp
+ movl 8(%eax), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 12(%esi), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 24(%esi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 44(%esp), %esi # 4-byte Reload
+ subl (%eax), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ sbbl 4(%eax), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 8(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%eax), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ sbbl 16(%eax), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ sbbl 20(%eax), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ sbbl 24(%eax), %edi
+ movl %edi, %ecx
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ js .LBB105_2
+# BB#1:
+ movl (%esp), %esi # 4-byte Reload
+.LBB105_2:
+ movl 72(%esp), %ecx
+ movl %esi, (%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ js .LBB105_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB105_4:
+ movl %eax, 4(%ecx)
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ movl 32(%esp), %esi # 4-byte Reload
+ movl 24(%esp), %ebx # 4-byte Reload
+ js .LBB105_6
+# BB#5:
+ movl 8(%esp), %ebx # 4-byte Reload
+.LBB105_6:
+ movl 72(%esp), %eax
+ movl %ebx, 8(%eax)
+ movl %eax, %ebx
+ js .LBB105_8
+# BB#7:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB105_8:
+ movl %esi, 12(%ebx)
+ js .LBB105_10
+# BB#9:
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB105_10:
+ movl %edx, 16(%ebx)
+ js .LBB105_12
+# BB#11:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB105_12:
+ movl %ecx, 20(%ebx)
+ js .LBB105_14
+# BB#13:
+ movl %edi, %ebp
+.LBB105_14:
+ movl %ebp, 24(%ebx)
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end105:
+ .size mcl_fp_addNF7Lbmi2, .Lfunc_end105-mcl_fp_addNF7Lbmi2
+
+ .globl mcl_fp_sub7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub7Lbmi2,@function
+mcl_fp_sub7Lbmi2: # @mcl_fp_sub7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 48(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ xorl %ebx, %ebx
+ movl 52(%esp), %esi
+ subl (%esi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 4(%esi), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edi), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 12(%edi), %ecx
+ sbbl 12(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 16(%edi), %eax
+ sbbl 16(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 20(%edi), %ebp
+ sbbl 20(%esi), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 24(%edi), %edi
+ sbbl 24(%esi), %edi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 44(%esp), %ebx
+ movl 16(%esp), %esi # 4-byte Reload
+ movl %esi, (%ebx)
+ movl 20(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl %ecx, 12(%ebx)
+ movl %eax, 16(%ebx)
+ movl %ebp, 20(%ebx)
+ movl %edi, 24(%ebx)
+ je .LBB106_2
+# BB#1: # %carry
+ movl 56(%esp), %ebp
+ movl 16(%esp), %ecx # 4-byte Reload
+ addl (%ebp), %ecx
+ movl %ecx, (%ebx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 4(%ebp), %edx
+ movl %edx, 4(%ebx)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 8(%ebp), %ecx
+ movl 12(%ebp), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%ebp), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl %ecx, 16(%ebx)
+ movl 20(%ebp), %eax
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 24(%ebp), %eax
+ adcl %edi, %eax
+ movl %eax, 24(%ebx)
+.LBB106_2: # %nocarry
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end106:
+ .size mcl_fp_sub7Lbmi2, .Lfunc_end106-mcl_fp_sub7Lbmi2
+
+ .globl mcl_fp_subNF7Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF7Lbmi2,@function
+mcl_fp_subNF7Lbmi2: # @mcl_fp_subNF7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edx
+ movl 60(%esp), %ecx
+ subl (%ecx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ sbbl 4(%ecx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 24(%eax), %edx
+ movl 20(%eax), %esi
+ movl 16(%eax), %edi
+ movl 12(%eax), %ebx
+ movl 8(%eax), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ sarl $31, %ecx
+ movl %ecx, %eax
+ shldl $1, %edx, %eax
+ movl 64(%esp), %edx
+ andl (%edx), %eax
+ movl 24(%edx), %esi
+ andl %ecx, %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 20(%edx), %ebx
+ andl %ecx, %ebx
+ movl 16(%edx), %edi
+ andl %ecx, %edi
+ movl 12(%edx), %esi
+ andl %ecx, %esi
+ movl 64(%esp), %edx
+ movl 8(%edx), %edx
+ andl %ecx, %edx
+ movl 64(%esp), %ebp
+ andl 4(%ebp), %ecx
+ addl 20(%esp), %eax # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ebp
+ movl %eax, (%ebp)
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, %eax
+ movl %ecx, 4(%eax)
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 8(%eax)
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 12(%eax)
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 16(%eax)
+ movl %ebx, 20(%eax)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%eax)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end107:
+ .size mcl_fp_subNF7Lbmi2, .Lfunc_end107-mcl_fp_subNF7Lbmi2
+
+ .globl mcl_fpDbl_add7Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add7Lbmi2,@function
+mcl_fpDbl_add7Lbmi2: # @mcl_fpDbl_add7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 72(%esp), %esi
+ movl 68(%esp), %edx
+ movl 12(%edx), %edi
+ movl 16(%edx), %ecx
+ movl 8(%esi), %eax
+ movl (%esi), %ebx
+ addl (%edx), %ebx
+ movl 64(%esp), %ebp
+ movl %ebx, (%ebp)
+ movl 4(%esi), %ebx
+ adcl 4(%edx), %ebx
+ adcl 8(%edx), %eax
+ adcl 12(%esi), %edi
+ adcl 16(%esi), %ecx
+ movl %ebx, 4(%ebp)
+ movl %esi, %ebx
+ movl 36(%ebx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl %eax, 8(%ebp)
+ movl 20(%ebx), %eax
+ movl %edi, 12(%ebp)
+ movl 20(%edx), %edi
+ adcl %eax, %edi
+ movl 24(%ebx), %eax
+ movl %ecx, 16(%ebp)
+ movl 24(%edx), %ecx
+ adcl %eax, %ecx
+ movl 28(%ebx), %eax
+ movl %edi, 20(%ebp)
+ movl 28(%edx), %edi
+ adcl %eax, %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 32(%ebx), %eax
+ movl %ecx, 24(%ebp)
+ movl 32(%edx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%edx), %esi
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl 40(%ebx), %ecx
+ movl 40(%edx), %eax
+ adcl %ecx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%ebx), %ebp
+ movl 44(%edx), %ecx
+ adcl %ebp, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 48(%ebx), %ebp
+ movl %ebx, %eax
+ movl 48(%edx), %ebx
+ adcl %ebp, %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 52(%eax), %eax
+ movl 52(%edx), %ebp
+ adcl %eax, %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 76(%esp), %eax
+ subl (%eax), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ sbbl 4(%eax), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ movl 76(%esp), %edi
+ sbbl 8(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 20(%edi), %ebx
+ sbbl 24(%edi), %ebp
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB108_2
+# BB#1:
+ movl %ebp, 32(%esp) # 4-byte Spill
+.LBB108_2:
+ testb %dl, %dl
+ movl 20(%esp), %ecx # 4-byte Reload
+ jne .LBB108_4
+# BB#3:
+ movl (%esp), %esi # 4-byte Reload
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB108_4:
+ movl 64(%esp), %eax
+ movl %ecx, 28(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl %esi, 36(%eax)
+ movl 24(%esp), %edx # 4-byte Reload
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB108_6
+# BB#5:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB108_6:
+ movl %ecx, 40(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ jne .LBB108_8
+# BB#7:
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB108_8:
+ movl %edx, 44(%eax)
+ jne .LBB108_10
+# BB#9:
+ movl %ebx, %ecx
+.LBB108_10:
+ movl %ecx, 48(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end108:
+ .size mcl_fpDbl_add7Lbmi2, .Lfunc_end108-mcl_fpDbl_add7Lbmi2
+
+ .globl mcl_fpDbl_sub7Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub7Lbmi2,@function
+mcl_fpDbl_sub7Lbmi2: # @mcl_fpDbl_sub7Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %edx
+ movl 60(%esp), %edi
+ subl (%edi), %eax
+ sbbl 4(%edi), %edx
+ movl 8(%esi), %ebx
+ sbbl 8(%edi), %ebx
+ movl 52(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%esi), %edx
+ sbbl 16(%edi), %edx
+ movl %ebx, 8(%ecx)
+ movl 20(%edi), %ebx
+ movl %eax, 12(%ecx)
+ movl 20(%esi), %eax
+ sbbl %ebx, %eax
+ movl 24(%edi), %ebx
+ movl %edx, 16(%ecx)
+ movl 24(%esi), %edx
+ sbbl %ebx, %edx
+ movl 28(%edi), %ebx
+ movl %eax, 20(%ecx)
+ movl 28(%esi), %eax
+ sbbl %ebx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 32(%edi), %eax
+ movl %edx, 24(%ecx)
+ movl 32(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 36(%edi), %eax
+ movl 36(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 40(%edi), %eax
+ movl 40(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 44(%edi), %eax
+ movl 44(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%edi), %eax
+ movl 48(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 52(%edi), %eax
+ movl 52(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 64(%esp), %esi
+ jne .LBB109_1
+# BB#2:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB109_3
+.LBB109_1:
+ movl 24(%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+.LBB109_3:
+ testb %al, %al
+ jne .LBB109_4
+# BB#5:
+ movl $0, %edi
+ movl $0, %eax
+ jmp .LBB109_6
+.LBB109_4:
+ movl (%esi), %eax
+ movl 4(%esi), %edi
+.LBB109_6:
+ jne .LBB109_7
+# BB#8:
+ movl $0, %ebx
+ jmp .LBB109_9
+.LBB109_7:
+ movl 20(%esi), %ebx
+.LBB109_9:
+ jne .LBB109_10
+# BB#11:
+ movl $0, %ebp
+ jmp .LBB109_12
+.LBB109_10:
+ movl 16(%esi), %ebp
+.LBB109_12:
+ jne .LBB109_13
+# BB#14:
+ movl $0, %edx
+ jmp .LBB109_15
+.LBB109_13:
+ movl 12(%esi), %edx
+.LBB109_15:
+ jne .LBB109_16
+# BB#17:
+ xorl %esi, %esi
+ jmp .LBB109_18
+.LBB109_16:
+ movl 8(%esi), %esi
+.LBB109_18:
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 32(%ecx)
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 36(%ecx)
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, 40(%ecx)
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 44(%ecx)
+ movl %ebx, 48(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end109:
+ .size mcl_fpDbl_sub7Lbmi2, .Lfunc_end109-mcl_fpDbl_sub7Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv256x32,@function
+.LmulPv256x32: # @mulPv256x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl %edx, %eax
+ movl 40(%esp), %edx
+ mulxl 4(%eax), %edi, %esi
+ mulxl (%eax), %ebp, %ebx
+ movl %ebp, 16(%esp) # 4-byte Spill
+ addl %edi, %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ mulxl 8(%eax), %edi, %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ mulxl 12(%eax), %ebx, %esi
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ mulxl 16(%eax), %edi, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl %ebp, %edx
+ mulxl 20(%eax), %esi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %ebp, %edx
+ mulxl 24(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl %ebx, 12(%ecx)
+ movl %edi, 16(%ecx)
+ movl %esi, 20(%ecx)
+ movl %edx, 24(%ecx)
+ movl 40(%esp), %edx
+ mulxl 28(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ adcl $0, %edx
+ movl %edx, 32(%ecx)
+ movl %ecx, %eax
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end110:
+ .size .LmulPv256x32, .Lfunc_end110-.LmulPv256x32
+
+ .globl mcl_fp_mulUnitPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre8Lbmi2,@function
+mcl_fp_mulUnitPre8Lbmi2: # @mcl_fp_mulUnitPre8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ calll .L111$pb
+.L111$pb:
+ popl %ebx
+.Ltmp2:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp2-.L111$pb), %ebx
+ movl 88(%esp), %eax
+ movl %eax, (%esp)
+ leal 24(%esp), %ecx
+ movl 84(%esp), %edx
+ calll .LmulPv256x32
+ movl 56(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 48(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %esi
+ movl 40(%esp), %edi
+ movl 36(%esp), %ebx
+ movl 32(%esp), %ebp
+ movl 24(%esp), %edx
+ movl 28(%esp), %ecx
+ movl 80(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %ebp, 8(%eax)
+ movl %ebx, 12(%eax)
+ movl %edi, 16(%eax)
+ movl %esi, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end111:
+ .size mcl_fp_mulUnitPre8Lbmi2, .Lfunc_end111-mcl_fp_mulUnitPre8Lbmi2
+
+ .globl mcl_fpDbl_mulPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre8Lbmi2,@function
+mcl_fpDbl_mulPre8Lbmi2: # @mcl_fpDbl_mulPre8Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $156, %esp
+ calll .L112$pb
+.L112$pb:
+ popl %ebx
+.Ltmp3:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp3-.L112$pb), %ebx
+ movl %ebx, -96(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 8(%esp)
+ movl 12(%ebp), %edi
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre4Lbmi2@PLT
+ leal 16(%esi), %eax
+ movl %eax, 8(%esp)
+ leal 16(%edi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 32(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre4Lbmi2@PLT
+ movl 24(%edi), %esi
+ movl (%edi), %ebx
+ movl 4(%edi), %eax
+ addl 16(%edi), %ebx
+ movl %ebx, -120(%ebp) # 4-byte Spill
+ adcl 20(%edi), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ adcl 8(%edi), %esi
+ movl %esi, -108(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -80(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ addl 16(%edi), %eax
+ adcl 20(%edi), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ movl 24(%edi), %edx
+ adcl 8(%edi), %edx
+ movl 28(%edi), %ecx
+ adcl 12(%edi), %ecx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -128(%ebp) # 4-byte Spill
+ jb .LBB112_2
+# BB#1:
+ xorl %esi, %esi
+ xorl %ebx, %ebx
+.LBB112_2:
+ movl %ebx, -112(%ebp) # 4-byte Spill
+ movl %esi, -104(%ebp) # 4-byte Spill
+ movl 12(%ebp), %esi
+ movl 28(%esi), %edi
+ movl -80(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 12(%esi), %edi
+ movl %edi, -116(%ebp) # 4-byte Spill
+ movl %ecx, -84(%ebp) # 4-byte Spill
+ movl %edx, %edi
+ movl -124(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -80(%ebp) # 4-byte Spill
+ movl %eax, -92(%ebp) # 4-byte Spill
+ jb .LBB112_4
+# BB#3:
+ movl $0, -84(%ebp) # 4-byte Folded Spill
+ movl $0, %edi
+ movl $0, -80(%ebp) # 4-byte Folded Spill
+ movl $0, -92(%ebp) # 4-byte Folded Spill
+.LBB112_4:
+ movl %edi, -88(%ebp) # 4-byte Spill
+ movl -120(%ebp), %esi # 4-byte Reload
+ movl %esi, -60(%ebp)
+ movl -100(%ebp), %edi # 4-byte Reload
+ movl %edi, -56(%ebp)
+ movl -108(%ebp), %esi # 4-byte Reload
+ movl %esi, -52(%ebp)
+ movl %eax, -76(%ebp)
+ movl %ebx, -72(%ebp)
+ movl %edx, -68(%ebp)
+ movl %ecx, -64(%ebp)
+ sbbl %edx, %edx
+ movl -116(%ebp), %esi # 4-byte Reload
+ movl %esi, -48(%ebp)
+ movl -128(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB112_6
+# BB#5:
+ movl $0, %esi
+ movl $0, %edi
+.LBB112_6:
+ sbbl %eax, %eax
+ leal -76(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -60(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -44(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl %edi, %eax
+ movl -92(%ebp), %edi # 4-byte Reload
+ addl -112(%ebp), %edi # 4-byte Folded Reload
+ adcl %eax, -80(%ebp) # 4-byte Folded Spill
+ movl -104(%ebp), %eax # 4-byte Reload
+ adcl %eax, -88(%ebp) # 4-byte Folded Spill
+ adcl %esi, -84(%ebp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -92(%ebp) # 4-byte Spill
+ movl -96(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre4Lbmi2@PLT
+ addl -28(%ebp), %edi
+ movl -80(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -80(%ebp) # 4-byte Spill
+ movl -88(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -88(%ebp) # 4-byte Spill
+ movl -84(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -84(%ebp) # 4-byte Spill
+ adcl %esi, -92(%ebp) # 4-byte Folded Spill
+ movl -44(%ebp), %eax
+ movl 8(%ebp), %esi
+ subl (%esi), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ebx
+ sbbl 4(%esi), %ebx
+ movl -36(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl -32(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl 16(%esi), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl 20(%esi), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ sbbl %eax, -80(%ebp) # 4-byte Folded Spill
+ movl 24(%esi), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ sbbl %eax, -88(%ebp) # 4-byte Folded Spill
+ movl 28(%esi), %eax
+ movl %eax, -108(%ebp) # 4-byte Spill
+ sbbl %eax, -84(%ebp) # 4-byte Folded Spill
+ sbbl $0, -92(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %ecx
+ movl %ecx, -132(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ subl %ecx, %eax
+ movl 36(%esi), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 40(%esi), %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 48(%esi), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 52(%esi), %ecx
+ movl %ecx, -116(%ebp) # 4-byte Spill
+ sbbl %ecx, -80(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ sbbl %ecx, -88(%ebp) # 4-byte Folded Spill
+ movl 60(%esi), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ sbbl %ecx, -84(%ebp) # 4-byte Folded Spill
+ sbbl $0, -92(%ebp) # 4-byte Folded Spill
+ addl -100(%ebp), %eax # 4-byte Folded Reload
+ adcl -112(%ebp), %ebx # 4-byte Folded Reload
+ movl %eax, 16(%esi)
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -104(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%esi)
+ adcl -108(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 24(%esi)
+ adcl -132(%ebp), %edi # 4-byte Folded Reload
+ movl %edx, 28(%esi)
+ movl -80(%ebp), %eax # 4-byte Reload
+ adcl -136(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 32(%esi)
+ movl -88(%ebp), %ecx # 4-byte Reload
+ adcl -128(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ movl -84(%ebp), %eax # 4-byte Reload
+ adcl -140(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%esi)
+ movl -92(%ebp), %ecx # 4-byte Reload
+ adcl -144(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%esi)
+ movl %ecx, 48(%esi)
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 52(%esi)
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 56(%esi)
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 60(%esi)
+ addl $156, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end112:
+ .size mcl_fpDbl_mulPre8Lbmi2, .Lfunc_end112-mcl_fpDbl_mulPre8Lbmi2
+
+ .globl mcl_fpDbl_sqrPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre8Lbmi2,@function
+mcl_fpDbl_sqrPre8Lbmi2: # @mcl_fpDbl_sqrPre8Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $156, %esp
+ calll .L113$pb
+.L113$pb:
+ popl %ebx
+.Ltmp4:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp4-.L113$pb), %ebx
+ movl %ebx, -96(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre4Lbmi2@PLT
+ leal 16(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 32(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre4Lbmi2@PLT
+ movl (%edi), %esi
+ movl 4(%edi), %ecx
+ addl 16(%edi), %esi
+ movl %esi, -108(%ebp) # 4-byte Spill
+ adcl 20(%edi), %ecx
+ seto %al
+ lahf
+ movl %eax, %edx
+ addl %esi, %esi
+ movl %esi, -84(%ebp) # 4-byte Spill
+ movl %ecx, %esi
+ adcl %esi, %esi
+ movl %esi, -80(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %esi
+ popl %eax
+ movl %esi, -88(%ebp) # 4-byte Spill
+ movl 24(%edi), %esi
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 8(%edi), %esi
+ movl 28(%edi), %edx
+ adcl 12(%edi), %edx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -100(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %ebx
+ sbbl %edi, %edi
+ movl %edi, -92(%ebp) # 4-byte Spill
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB113_2
+# BB#1:
+ movl $0, -80(%ebp) # 4-byte Folded Spill
+ movl $0, -84(%ebp) # 4-byte Folded Spill
+.LBB113_2:
+ movl %esi, %ebx
+ movl -88(%ebp), %edi # 4-byte Reload
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ adcl %ebx, %ebx
+ movl %edx, %edi
+ adcl %edi, %edi
+ movl -104(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB113_4
+# BB#3:
+ xorl %edi, %edi
+ xorl %ebx, %ebx
+.LBB113_4:
+ movl %ebx, -88(%ebp) # 4-byte Spill
+ movl -108(%ebp), %eax # 4-byte Reload
+ movl %eax, -60(%ebp)
+ movl %ecx, -56(%ebp)
+ movl %esi, -52(%ebp)
+ movl %edx, -48(%ebp)
+ movl %eax, -76(%ebp)
+ movl %ecx, -72(%ebp)
+ movl %esi, -68(%ebp)
+ movl %edx, -64(%ebp)
+ movl -100(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB113_5
+# BB#6:
+ movl $0, -100(%ebp) # 4-byte Folded Spill
+ jmp .LBB113_7
+.LBB113_5:
+ shrl $31, %edx
+ movl %edx, -100(%ebp) # 4-byte Spill
+.LBB113_7:
+ leal -76(%ebp), %eax
+ movl %eax, 8(%esp)
+ leal -60(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -44(%ebp), %eax
+ movl %eax, (%esp)
+ movl -92(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -96(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre4Lbmi2@PLT
+ movl -84(%ebp), %eax # 4-byte Reload
+ addl -28(%ebp), %eax
+ movl %eax, -84(%ebp) # 4-byte Spill
+ movl -80(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -80(%ebp) # 4-byte Spill
+ movl -88(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -88(%ebp) # 4-byte Spill
+ adcl -16(%ebp), %edi
+ movl %edi, -92(%ebp) # 4-byte Spill
+ adcl -100(%ebp), %esi # 4-byte Folded Reload
+ movl -44(%ebp), %eax
+ movl 8(%ebp), %edi
+ subl (%edi), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ebx
+ sbbl 4(%edi), %ebx
+ movl -36(%ebp), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl -32(%ebp), %edx
+ sbbl 12(%edi), %edx
+ movl 16(%edi), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ sbbl %eax, -84(%ebp) # 4-byte Folded Spill
+ movl 20(%edi), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ sbbl %eax, -80(%ebp) # 4-byte Folded Spill
+ movl 24(%edi), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ sbbl %eax, -88(%ebp) # 4-byte Folded Spill
+ movl 28(%edi), %eax
+ movl %eax, -108(%ebp) # 4-byte Spill
+ sbbl %eax, -92(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ movl 32(%edi), %ecx
+ movl %ecx, -132(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ subl %ecx, %eax
+ movl 36(%edi), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 40(%edi), %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 44(%edi), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 48(%edi), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ sbbl %ecx, -84(%ebp) # 4-byte Folded Spill
+ movl 52(%edi), %ecx
+ movl %ecx, -116(%ebp) # 4-byte Spill
+ sbbl %ecx, -80(%ebp) # 4-byte Folded Spill
+ movl 56(%edi), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ sbbl %ecx, -88(%ebp) # 4-byte Folded Spill
+ movl 60(%edi), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ sbbl %ecx, -92(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ addl -100(%ebp), %eax # 4-byte Folded Reload
+ adcl -112(%ebp), %ebx # 4-byte Folded Reload
+ movl %eax, 16(%edi)
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -104(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%edi)
+ adcl -108(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 24(%edi)
+ movl -84(%ebp), %eax # 4-byte Reload
+ adcl -132(%ebp), %eax # 4-byte Folded Reload
+ movl %edx, 28(%edi)
+ movl -80(%ebp), %ecx # 4-byte Reload
+ adcl -136(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%edi)
+ movl -88(%ebp), %eax # 4-byte Reload
+ adcl -128(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%edi)
+ movl -92(%ebp), %ecx # 4-byte Reload
+ adcl -140(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%edi)
+ adcl -144(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 44(%edi)
+ movl %esi, 48(%edi)
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 52(%edi)
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 56(%edi)
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 60(%edi)
+ addl $156, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end113:
+ .size mcl_fpDbl_sqrPre8Lbmi2, .Lfunc_end113-mcl_fpDbl_sqrPre8Lbmi2
+
+ .globl mcl_fp_mont8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont8Lbmi2,@function
+mcl_fp_mont8Lbmi2: # @mcl_fp_mont8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $700, %esp # imm = 0x2BC
+ calll .L114$pb
+.L114$pb:
+ popl %ebx
+.Ltmp5:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp5-.L114$pb), %ebx
+ movl 732(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 664(%esp), %ebp
+ movl 668(%esp), %edi
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 696(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 688(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 684(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 676(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 672(%esp), %esi
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 624(%esp), %ebp
+ adcl 628(%esp), %edi
+ adcl 632(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ sbbl %eax, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 584(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 60(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 584(%esp), %edi
+ adcl 588(%esp), %esi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 592(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 596(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 600(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 604(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 608(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 612(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 616(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %edi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 732(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ andl $1, %ebp
+ addl 544(%esp), %edi
+ adcl 548(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 568(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 728(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ addl 504(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 524(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 536(%esp), %ebp
+ sbbl %edi, %edi
+ movl %esi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 464(%esp), %esi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 480(%esp), %edi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 484(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 488(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 496(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 724(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 424(%esp), %ecx
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 428(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 436(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 444(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 448(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 384(%esp), %esi
+ adcl 388(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 392(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 396(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 400(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 404(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 408(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 412(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 416(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 28(%esp), %ecx # 4-byte Reload
+ addl 344(%esp), %ecx
+ adcl 348(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 352(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 364(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 372(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %ebp, %eax
+ andl $1, %eax
+ addl 304(%esp), %edi
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 308(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 312(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 316(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 328(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 336(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 264(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 272(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 276(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 284(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 224(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 228(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 232(%esp), %esi
+ adcl 236(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 240(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 244(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 248(%esp), %ebp
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 256(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 184(%esp), %ecx
+ adcl 188(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 196(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 204(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 144(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 152(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 160(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 172(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 176(%esp), %ebp
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 104(%esp), %ecx
+ adcl 108(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 116(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 128(%esp), %edi
+ adcl 132(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 24(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ andl $1, %esi
+ addl 64(%esp), %ebp
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebx
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 88(%esp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %eax, %edx
+ movl 732(%esp), %ebp
+ subl (%ebp), %edx
+ movl %ecx, %eax
+ sbbl 4(%ebp), %eax
+ movl %ebx, %ecx
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ sbbl 20(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ sbbl $0, %esi
+ andl $1, %esi
+ movl %esi, %ecx
+ jne .LBB114_2
+# BB#1:
+ movl %edx, %ebp
+.LBB114_2:
+ movl 720(%esp), %edx
+ movl %ebp, (%edx)
+ testb %cl, %cl
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB114_4
+# BB#3:
+ movl %eax, %ebp
+.LBB114_4:
+ movl %ebp, 4(%edx)
+ jne .LBB114_6
+# BB#5:
+ movl 12(%esp), %ebx # 4-byte Reload
+.LBB114_6:
+ movl %ebx, 8(%edx)
+ movl 28(%esp), %eax # 4-byte Reload
+ jne .LBB114_8
+# BB#7:
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+.LBB114_8:
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%edx)
+ movl 40(%esp), %edi # 4-byte Reload
+ jne .LBB114_10
+# BB#9:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB114_10:
+ movl %edi, 16(%edx)
+ jne .LBB114_12
+# BB#11:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB114_12:
+ movl %eax, 20(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ jne .LBB114_14
+# BB#13:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB114_14:
+ movl %eax, 24(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ jne .LBB114_16
+# BB#15:
+ movl 52(%esp), %eax # 4-byte Reload
+.LBB114_16:
+ movl %eax, 28(%edx)
+ addl $700, %esp # imm = 0x2BC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end114:
+ .size mcl_fp_mont8Lbmi2, .Lfunc_end114-mcl_fp_mont8Lbmi2
+
+ .globl mcl_fp_montNF8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF8Lbmi2,@function
+mcl_fp_montNF8Lbmi2: # @mcl_fp_montNF8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $700, %esp # imm = 0x2BC
+ calll .L115$pb
+.L115$pb:
+ popl %ebx
+.Ltmp6:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp6-.L115$pb), %ebx
+ movl 732(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 664(%esp), %ebp
+ movl 668(%esp), %edi
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 696(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 688(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 684(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 676(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 672(%esp), %esi
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 624(%esp), %ebp
+ adcl 628(%esp), %edi
+ adcl 632(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 640(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 584(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 616(%esp), %ecx
+ addl 584(%esp), %edi
+ adcl 588(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 596(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 604(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 732(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ addl 544(%esp), %edi
+ adcl 548(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 564(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 572(%esp), %edi
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 576(%esp), %ebp
+ movl 728(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 536(%esp), %ecx
+ addl 504(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 528(%esp), %edi
+ adcl 532(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 464(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 472(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 484(%esp), %esi
+ adcl 488(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 496(%esp), %edi
+ movl 728(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 456(%esp), %eax
+ movl 44(%esp), %edx # 4-byte Reload
+ addl 424(%esp), %edx
+ adcl 428(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 432(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 436(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 440(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 448(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 452(%esp), %edi
+ movl %edi, %ebp
+ movl %eax, %edi
+ adcl $0, %edi
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 384(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 396(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 412(%esp), %ebp
+ adcl 416(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 376(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 344(%esp), %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 352(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 360(%esp), %esi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 364(%esp), %edi
+ adcl 368(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 304(%esp), %ebp
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 308(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 320(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 324(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 328(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 336(%esp), %edi
+ movl 728(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 724(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ movl 296(%esp), %edx
+ movl %ebp, %ecx
+ addl 264(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 280(%esp), %ebp
+ adcl 284(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl %edx, %edi
+ adcl $0, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 224(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 236(%esp), %esi
+ adcl 240(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 256(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 216(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 184(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 192(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 196(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 144(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 156(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 160(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 176(%esp), %ebp
+ movl 728(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 136(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 104(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 116(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ adcl $0, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 64(%esp), %esi
+ movl 32(%esp), %esi # 4-byte Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 68(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 76(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 92(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 96(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 732(%esp), %eax
+ subl (%eax), %edx
+ sbbl 4(%eax), %ecx
+ sbbl 8(%eax), %esi
+ sbbl 12(%eax), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ sbbl 16(%eax), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %ebx # 4-byte Reload
+ sbbl 20(%eax), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ sbbl 24(%eax), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ sbbl 28(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ testl %edi, %edi
+ js .LBB115_2
+# BB#1:
+ movl %edx, 56(%esp) # 4-byte Spill
+.LBB115_2:
+ movl 720(%esp), %edx
+ movl 56(%esp), %eax # 4-byte Reload
+ movl %eax, (%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB115_4
+# BB#3:
+ movl %ecx, %eax
+.LBB115_4:
+ movl %eax, 4(%edx)
+ js .LBB115_6
+# BB#5:
+ movl %esi, 32(%esp) # 4-byte Spill
+.LBB115_6:
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%edx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ js .LBB115_8
+# BB#7:
+ movl 12(%esp), %esi # 4-byte Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+.LBB115_8:
+ movl 44(%esp), %esi # 4-byte Reload
+ movl %esi, 12(%edx)
+ js .LBB115_10
+# BB#9:
+ movl 16(%esp), %edi # 4-byte Reload
+.LBB115_10:
+ movl %edi, 16(%edx)
+ js .LBB115_12
+# BB#11:
+ movl 20(%esp), %ebp # 4-byte Reload
+.LBB115_12:
+ movl %ebp, 20(%edx)
+ js .LBB115_14
+# BB#13:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB115_14:
+ movl %eax, 24(%edx)
+ js .LBB115_16
+# BB#15:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB115_16:
+ movl %ecx, 28(%edx)
+ addl $700, %esp # imm = 0x2BC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end115:
+ .size mcl_fp_montNF8Lbmi2, .Lfunc_end115-mcl_fp_montNF8Lbmi2
+
+ .globl mcl_fp_montRed8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed8Lbmi2,@function
+mcl_fp_montRed8Lbmi2: # @mcl_fp_montRed8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $428, %esp # imm = 0x1AC
+ calll .L116$pb
+.L116$pb:
+ popl %ebx
+.Ltmp7:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp7-.L116$pb), %ebx
+ movl 456(%esp), %edx
+ movl -4(%edx), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl (%eax), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ imull %edi, %ecx
+ movl 60(%eax), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 56(%eax), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 52(%eax), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 48(%eax), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 44(%eax), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 40(%eax), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 36(%eax), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 32(%eax), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 24(%eax), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 20(%eax), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 16(%eax), %ebp
+ movl 12(%eax), %edi
+ movl 8(%eax), %esi
+ movl (%edx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 16(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 12(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 8(%edx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 4(%edx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, (%esp)
+ leal 392(%esp), %ecx
+ calll .LmulPv256x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 392(%esp), %eax
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 396(%esp), %ecx
+ adcl 400(%esp), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 404(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 408(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ movl 64(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 352(%esp), %edi
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 356(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 360(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 364(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 368(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 376(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 380(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 384(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 312(%esp), %edi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 316(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 272(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 276(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 232(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 236(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 252(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 192(%esp), %edi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 196(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 204(%esp), %edi
+ adcl 208(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 152(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ adcl 160(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 168(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 172(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 180(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 112(%esp), %esi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 116(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 128(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %eax, %esi
+ adcl 136(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ecx, %edx
+ subl 24(%esp), %edx # 4-byte Folded Reload
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ movl 104(%esp), %ebp # 4-byte Reload
+ sbbl 28(%esp), %ebp # 4-byte Folded Reload
+ sbbl 32(%esp), %ebx # 4-byte Folded Reload
+ sbbl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ sbbl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ sbbl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ sbbl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 96(%esp) # 4-byte Spill
+ sbbl $0, %edi
+ andl $1, %edi
+ jne .LBB116_2
+# BB#1:
+ movl %edx, %ecx
+.LBB116_2:
+ movl 448(%esp), %edx
+ movl %ecx, (%edx)
+ movl %edi, %ecx
+ testb %cl, %cl
+ jne .LBB116_4
+# BB#3:
+ movl %eax, 108(%esp) # 4-byte Spill
+.LBB116_4:
+ movl 108(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB116_6
+# BB#5:
+ movl %ebp, %eax
+.LBB116_6:
+ movl %eax, 8(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ jne .LBB116_8
+# BB#7:
+ movl %ebx, %ebp
+.LBB116_8:
+ movl %ebp, 12(%edx)
+ movl 100(%esp), %ebx # 4-byte Reload
+ jne .LBB116_10
+# BB#9:
+ movl 68(%esp), %ebx # 4-byte Reload
+.LBB116_10:
+ movl %ebx, 16(%edx)
+ movl 80(%esp), %edi # 4-byte Reload
+ jne .LBB116_12
+# BB#11:
+ movl 72(%esp), %edi # 4-byte Reload
+.LBB116_12:
+ movl %edi, 20(%edx)
+ movl 88(%esp), %esi # 4-byte Reload
+ jne .LBB116_14
+# BB#13:
+ movl 92(%esp), %esi # 4-byte Reload
+.LBB116_14:
+ movl %esi, 24(%edx)
+ jne .LBB116_16
+# BB#15:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB116_16:
+ movl %eax, 28(%edx)
+ addl $428, %esp # imm = 0x1AC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end116:
+ .size mcl_fp_montRed8Lbmi2, .Lfunc_end116-mcl_fp_montRed8Lbmi2
+
+ .globl mcl_fp_addPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre8Lbmi2,@function
+mcl_fp_addPre8Lbmi2: # @mcl_fp_addPre8Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 20(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %edi
+ adcl 8(%esi), %edi
+ movl 16(%esp), %ebx
+ movl %ecx, (%ebx)
+ movl 12(%esi), %ecx
+ movl %edx, 4(%ebx)
+ movl 16(%esi), %edx
+ adcl 12(%eax), %ecx
+ adcl 16(%eax), %edx
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %ecx, 12(%ebx)
+ movl 20(%esi), %ecx
+ adcl %edi, %ecx
+ movl 24(%eax), %edi
+ movl %edx, 16(%ebx)
+ movl 24(%esi), %edx
+ adcl %edi, %edx
+ movl %ecx, 20(%ebx)
+ movl %edx, 24(%ebx)
+ movl 28(%eax), %eax
+ movl 28(%esi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 28(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end117:
+ .size mcl_fp_addPre8Lbmi2, .Lfunc_end117-mcl_fp_addPre8Lbmi2
+
+ .globl mcl_fp_subPre8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre8Lbmi2,@function
+mcl_fp_subPre8Lbmi2: # @mcl_fp_subPre8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 28(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edi), %ebx
+ movl 20(%esp), %ebp
+ movl %edx, (%ebp)
+ movl 12(%ecx), %edx
+ sbbl 12(%edi), %edx
+ movl %esi, 4(%ebp)
+ movl 16(%ecx), %esi
+ sbbl 16(%edi), %esi
+ movl %ebx, 8(%ebp)
+ movl 20(%edi), %ebx
+ movl %edx, 12(%ebp)
+ movl 20(%ecx), %edx
+ sbbl %ebx, %edx
+ movl 24(%edi), %ebx
+ movl %esi, 16(%ebp)
+ movl 24(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edx, 20(%ebp)
+ movl %esi, 24(%ebp)
+ movl 28(%edi), %edx
+ movl 28(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 28(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end118:
+ .size mcl_fp_subPre8Lbmi2, .Lfunc_end118-mcl_fp_subPre8Lbmi2
+
+ .globl mcl_fp_shr1_8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_8Lbmi2,@function
+mcl_fp_shr1_8Lbmi2: # @mcl_fp_shr1_8Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %eax
+ shrdl $1, %eax, %ecx
+ movl %ecx, 24(%esi)
+ shrl %eax
+ movl %eax, 28(%esi)
+ popl %esi
+ retl
+.Lfunc_end119:
+ .size mcl_fp_shr1_8Lbmi2, .Lfunc_end119-mcl_fp_shr1_8Lbmi2
+
+ .globl mcl_fp_add8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add8Lbmi2,@function
+mcl_fp_add8Lbmi2: # @mcl_fp_add8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 44(%esp), %edx
+ addl (%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl 4(%edx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%edx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%edx), %esi
+ movl 16(%edx), %eax
+ adcl 12(%edi), %esi
+ adcl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ adcl 20(%edi), %ecx
+ movl 24(%edx), %ebx
+ adcl 24(%edi), %ebx
+ movl 28(%edx), %edi
+ movl 48(%esp), %edx
+ adcl 28(%edx), %edi
+ movl 40(%esp), %edx
+ movl %ebp, (%edx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%edx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%edx)
+ movl %esi, 12(%edx)
+ movl %eax, 16(%edx)
+ movl %ecx, 20(%edx)
+ movl %ebx, 24(%edx)
+ movl %edi, 28(%edx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 52(%esp), %edx
+ movl 8(%esp), %ebp # 4-byte Reload
+ subl (%edx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl 52(%esp), %edx
+ sbbl 4(%edx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 52(%esp), %edx
+ sbbl 8(%edx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp
+ sbbl 12(%ebp), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 4(%esp), %edx # 4-byte Reload
+ sbbl 16(%ebp), %edx
+ movl %edx, %esi
+ sbbl 20(%ebp), %ecx
+ sbbl 24(%ebp), %ebx
+ sbbl 28(%ebp), %edi
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB120_2
+# BB#1: # %nocarry
+ movl 8(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %ebp
+ movl %edx, (%ebp)
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%ebp)
+ movl 12(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%ebp)
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebp)
+ movl %esi, 16(%ebp)
+ movl %ecx, 20(%ebp)
+ movl %ebx, 24(%ebp)
+ movl %edi, 28(%ebp)
+.LBB120_2: # %carry
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end120:
+ .size mcl_fp_add8Lbmi2, .Lfunc_end120-mcl_fp_add8Lbmi2
+
+ .globl mcl_fp_addNF8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF8Lbmi2,@function
+mcl_fp_addNF8Lbmi2: # @mcl_fp_addNF8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 84(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edi
+ movl 80(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 4(%ebx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 28(%eax), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 20(%eax), %ebp
+ movl 16(%eax), %esi
+ movl 12(%eax), %edx
+ movl 8(%eax), %ecx
+ adcl 8(%ebx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 12(%ebx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 16(%ebx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 20(%ebx), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 24(%ebx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 28(%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, %eax
+ subl (%ebx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 4(%ebx), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ sbbl 8(%ebx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%ebx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 20(%ebx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ sbbl 24(%ebx), %ebp
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 28(%ebx), %esi
+ testl %esi, %esi
+ js .LBB121_2
+# BB#1:
+ movl (%esp), %eax # 4-byte Reload
+.LBB121_2:
+ movl 76(%esp), %ebx
+ movl %eax, (%ebx)
+ movl 32(%esp), %eax # 4-byte Reload
+ js .LBB121_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB121_4:
+ movl %eax, 4(%ebx)
+ movl 40(%esp), %edx # 4-byte Reload
+ movl 28(%esp), %edi # 4-byte Reload
+ js .LBB121_6
+# BB#5:
+ movl 8(%esp), %edi # 4-byte Reload
+.LBB121_6:
+ movl %edi, 8(%ebx)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB121_8
+# BB#7:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB121_8:
+ movl %eax, 12(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB121_10
+# BB#9:
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB121_10:
+ movl %edx, 16(%ebx)
+ js .LBB121_12
+# BB#11:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB121_12:
+ movl %ecx, 20(%ebx)
+ js .LBB121_14
+# BB#13:
+ movl %ebp, %eax
+.LBB121_14:
+ movl %eax, 24(%ebx)
+ js .LBB121_16
+# BB#15:
+ movl %esi, %edi
+.LBB121_16:
+ movl %edi, 28(%ebx)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end121:
+ .size mcl_fp_addNF8Lbmi2, .Lfunc_end121-mcl_fp_addNF8Lbmi2
+
+ .globl mcl_fp_sub8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub8Lbmi2,@function
+mcl_fp_sub8Lbmi2: # @mcl_fp_sub8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 52(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ xorl %ebx, %ebx
+ movl 56(%esp), %ebp
+ subl (%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 4(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%esi), %edx
+ sbbl 8(%ebp), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 16(%esi), %ecx
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%esi), %edi
+ sbbl 24(%ebp), %edi
+ movl 28(%esi), %esi
+ sbbl 28(%ebp), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 48(%esp), %ebx
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ebx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%ebx)
+ movl %ecx, 16(%ebx)
+ movl %eax, 20(%ebx)
+ movl %edi, 24(%ebx)
+ movl %esi, 28(%ebx)
+ je .LBB122_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 60(%esp), %esi
+ movl 16(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ adcl 8(%esi), %ebp
+ movl 12(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl %eax, 20(%ebx)
+ movl 24(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 24(%ebx)
+ movl 28(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+.LBB122_2: # %nocarry
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end122:
+ .size mcl_fp_sub8Lbmi2, .Lfunc_end122-mcl_fp_sub8Lbmi2
+
+ .globl mcl_fp_subNF8Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF8Lbmi2,@function
+mcl_fp_subNF8Lbmi2: # @mcl_fp_subNF8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edx
+ movl 68(%esp), %ecx
+ subl (%ecx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ sbbl 4(%ecx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 28(%eax), %edx
+ movl 24(%eax), %esi
+ movl 20(%eax), %edi
+ movl 16(%eax), %ebx
+ movl 12(%eax), %ebp
+ movl 8(%eax), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %edx, %edi
+ sbbl 28(%ecx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sarl $31, %edi
+ movl 72(%esp), %ebp
+ movl 28(%ebp), %eax
+ andl %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%ebp), %eax
+ andl %edi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 20(%ebp), %ebx
+ andl %edi, %ebx
+ movl 16(%ebp), %esi
+ andl %edi, %esi
+ movl 12(%ebp), %edx
+ andl %edi, %edx
+ movl 8(%ebp), %ecx
+ andl %edi, %ecx
+ movl 4(%ebp), %eax
+ andl %edi, %eax
+ andl (%ebp), %edi
+ addl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl 60(%esp), %ebp
+ movl %edi, (%ebp)
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 4(%ebp)
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 8(%ebp)
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 12(%ebp)
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 16(%ebp)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%ebp)
+ movl %eax, 24(%ebp)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ebp)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end123:
+ .size mcl_fp_subNF8Lbmi2, .Lfunc_end123-mcl_fp_subNF8Lbmi2
+
+ .globl mcl_fpDbl_add8Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add8Lbmi2,@function
+mcl_fpDbl_add8Lbmi2: # @mcl_fpDbl_add8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 84(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 80(%esp), %ebp
+ addl (%ebp), %esi
+ adcl 4(%ebp), %edx
+ movl 8(%ecx), %edi
+ adcl 8(%ebp), %edi
+ movl 12(%ebp), %ebx
+ movl 76(%esp), %eax
+ movl %esi, (%eax)
+ movl 16(%ebp), %esi
+ adcl 12(%ecx), %ebx
+ adcl 16(%ecx), %esi
+ movl %edx, 4(%eax)
+ movl 40(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %edi, 8(%eax)
+ movl 20(%ecx), %edx
+ movl %ebx, 12(%eax)
+ movl 20(%ebp), %edi
+ adcl %edx, %edi
+ movl 24(%ecx), %edx
+ movl %esi, 16(%eax)
+ movl 24(%ebp), %esi
+ adcl %edx, %esi
+ movl 28(%ecx), %edx
+ movl %edi, 20(%eax)
+ movl 28(%ebp), %ebx
+ adcl %edx, %ebx
+ movl 32(%ecx), %edx
+ movl %esi, 24(%eax)
+ movl 32(%ebp), %esi
+ adcl %edx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 36(%ecx), %edx
+ movl %ebx, 28(%eax)
+ movl 36(%ebp), %ebx
+ adcl %edx, %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 40(%ebp), %eax
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl 44(%ebp), %edi
+ adcl %edx, %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl 48(%ebp), %eax
+ adcl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 52(%ecx), %edx
+ movl 52(%ebp), %esi
+ adcl %edx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl 56(%ebp), %eax
+ adcl %edx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%ecx), %ecx
+ movl 60(%ebp), %ebp
+ adcl %ecx, %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ movl 88(%esp), %edx
+ subl (%edx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ sbbl 4(%eax), %ebx
+ movl %eax, %edx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ movl %edx, %ebx
+ sbbl 8(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl 24(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ sbbl 16(%ebx), %eax
+ sbbl 20(%ebx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ sbbl 24(%ebx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ sbbl 28(%ebx), %ebp
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB124_2
+# BB#1:
+ movl %eax, %edi
+.LBB124_2:
+ testb %cl, %cl
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB124_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB124_4:
+ movl 76(%esp), %eax
+ movl %ecx, 32(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl 32(%esp), %edx # 4-byte Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ movl 28(%esp), %ebx # 4-byte Reload
+ jne .LBB124_6
+# BB#5:
+ movl 4(%esp), %ebx # 4-byte Reload
+.LBB124_6:
+ movl %ebx, 36(%eax)
+ jne .LBB124_8
+# BB#7:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB124_8:
+ movl %esi, 40(%eax)
+ movl 36(%esp), %esi # 4-byte Reload
+ jne .LBB124_10
+# BB#9:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB124_10:
+ movl %edx, 44(%eax)
+ movl %edi, 48(%eax)
+ movl 52(%esp), %edx # 4-byte Reload
+ jne .LBB124_12
+# BB#11:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB124_12:
+ movl %esi, 52(%eax)
+ jne .LBB124_14
+# BB#13:
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB124_14:
+ movl %edx, 56(%eax)
+ jne .LBB124_16
+# BB#15:
+ movl %ebp, %ecx
+.LBB124_16:
+ movl %ecx, 60(%eax)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end124:
+ .size mcl_fpDbl_add8Lbmi2, .Lfunc_end124-mcl_fpDbl_add8Lbmi2
+
+ .globl mcl_fpDbl_sub8Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub8Lbmi2,@function
+mcl_fpDbl_sub8Lbmi2: # @mcl_fpDbl_sub8Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %edx
+ movl 68(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %edx
+ movl 8(%edi), %esi
+ sbbl 8(%ebx), %esi
+ movl 60(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edi), %eax
+ sbbl 12(%ebx), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%edi), %edx
+ sbbl 16(%ebx), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebx), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%edi), %eax
+ sbbl %esi, %eax
+ movl 24(%ebx), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%edi), %edx
+ sbbl %esi, %edx
+ movl 28(%ebx), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%edi), %eax
+ sbbl %esi, %eax
+ movl 32(%ebx), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%edi), %edx
+ sbbl %esi, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 36(%ebx), %edx
+ movl %eax, 28(%ecx)
+ movl 36(%edi), %eax
+ sbbl %edx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 40(%ebx), %eax
+ movl 40(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 44(%ebx), %eax
+ movl 44(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%ebx), %eax
+ movl 48(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 52(%ebx), %eax
+ movl 52(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 56(%ebx), %eax
+ movl 56(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 60(%ebx), %eax
+ movl 60(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 72(%esp), %ebx
+ jne .LBB125_1
+# BB#2:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB125_3
+.LBB125_1:
+ movl 28(%ebx), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+.LBB125_3:
+ testb %al, %al
+ jne .LBB125_4
+# BB#5:
+ movl $0, %ebp
+ movl $0, %eax
+ jmp .LBB125_6
+.LBB125_4:
+ movl (%ebx), %eax
+ movl 4(%ebx), %ebp
+.LBB125_6:
+ jne .LBB125_7
+# BB#8:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB125_9
+.LBB125_7:
+ movl 24(%ebx), %edx
+ movl %edx, (%esp) # 4-byte Spill
+.LBB125_9:
+ jne .LBB125_10
+# BB#11:
+ movl $0, %edx
+ jmp .LBB125_12
+.LBB125_10:
+ movl 20(%ebx), %edx
+.LBB125_12:
+ jne .LBB125_13
+# BB#14:
+ movl $0, %esi
+ jmp .LBB125_15
+.LBB125_13:
+ movl 16(%ebx), %esi
+.LBB125_15:
+ jne .LBB125_16
+# BB#17:
+ movl $0, %edi
+ jmp .LBB125_18
+.LBB125_16:
+ movl 12(%ebx), %edi
+.LBB125_18:
+ jne .LBB125_19
+# BB#20:
+ xorl %ebx, %ebx
+ jmp .LBB125_21
+.LBB125_19:
+ movl 8(%ebx), %ebx
+.LBB125_21:
+ addl 16(%esp), %eax # 4-byte Folded Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 36(%ecx)
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 40(%ecx)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 44(%ecx)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 52(%ecx)
+ movl %eax, 56(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%ecx)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end125:
+ .size mcl_fpDbl_sub8Lbmi2, .Lfunc_end125-mcl_fpDbl_sub8Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv288x32,@function
+.LmulPv288x32: # @mulPv288x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl %edx, %eax
+ movl 44(%esp), %edx
+ mulxl 4(%eax), %edi, %esi
+ mulxl (%eax), %ebp, %ebx
+ movl %ebp, 20(%esp) # 4-byte Spill
+ addl %edi, %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ mulxl 8(%eax), %edi, %ebx
+ adcl %esi, %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ mulxl 12(%eax), %esi, %edi
+ adcl %ebx, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ mulxl 16(%eax), %ebx, %esi
+ adcl %edi, %ebx
+ mulxl 20(%eax), %edi, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl %ebp, %edx
+ mulxl 24(%eax), %esi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %ebp, %edx
+ mulxl 28(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl %ebx, 16(%ecx)
+ movl %edi, 20(%ecx)
+ movl %esi, 24(%ecx)
+ movl %edx, 28(%ecx)
+ movl 44(%esp), %edx
+ mulxl 32(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ adcl $0, %edx
+ movl %edx, 36(%ecx)
+ movl %ecx, %eax
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end126:
+ .size .LmulPv288x32, .Lfunc_end126-.LmulPv288x32
+
+ .globl mcl_fp_mulUnitPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre9Lbmi2,@function
+mcl_fp_mulUnitPre9Lbmi2: # @mcl_fp_mulUnitPre9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $76, %esp
+ calll .L127$pb
+.L127$pb:
+ popl %ebx
+.Ltmp8:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp8-.L127$pb), %ebx
+ movl 104(%esp), %eax
+ movl %eax, (%esp)
+ leal 32(%esp), %ecx
+ movl 100(%esp), %edx
+ calll .LmulPv288x32
+ movl 68(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edi
+ movl 48(%esp), %ebx
+ movl 44(%esp), %ebp
+ movl 40(%esp), %esi
+ movl 32(%esp), %edx
+ movl 36(%esp), %ecx
+ movl 96(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %ebp, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %edi, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ addl $76, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end127:
+ .size mcl_fp_mulUnitPre9Lbmi2, .Lfunc_end127-mcl_fp_mulUnitPre9Lbmi2
+
+ .globl mcl_fpDbl_mulPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre9Lbmi2,@function
+mcl_fpDbl_mulPre9Lbmi2: # @mcl_fpDbl_mulPre9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $428, %esp # imm = 0x1AC
+ calll .L128$pb
+.L128$pb:
+ popl %esi
+.Ltmp9:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp9-.L128$pb), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 452(%esp), %edx
+ movl %edx, %ebp
+ movl %esi, %ebx
+ calll .LmulPv288x32
+ movl 420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl 388(%esp), %edi
+ movl 448(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 456(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl %ebp, %edx
+ movl %esi, %ebx
+ calll .LmulPv288x32
+ addl 344(%esp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 364(%esp), %ebx
+ movl 360(%esp), %edi
+ movl 356(%esp), %esi
+ movl 348(%esp), %ecx
+ movl 352(%esp), %edx
+ movl 448(%esp), %eax
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 304(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 324(%esp), %edi
+ movl 320(%esp), %ebp
+ movl 316(%esp), %esi
+ movl 308(%esp), %ecx
+ movl 312(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 264(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 288(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 284(%esp), %ebx
+ movl 280(%esp), %edi
+ movl 276(%esp), %esi
+ movl 268(%esp), %ecx
+ movl 272(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 224(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 248(%esp), %ebx
+ movl 244(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 240(%esp), %edi
+ movl 236(%esp), %ebp
+ movl 228(%esp), %ecx
+ movl 232(%esp), %edx
+ movl 448(%esp), %eax
+ movl 44(%esp), %esi # 4-byte Reload
+ movl %esi, 16(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 44(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 204(%esp), %edi
+ movl 200(%esp), %ebx
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 448(%esp), %eax
+ movl 44(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 20(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 144(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 164(%esp), %ebx
+ movl 160(%esp), %edi
+ movl 156(%esp), %esi
+ movl 148(%esp), %ecx
+ movl 152(%esp), %edx
+ movl 448(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 12(%esp), %esi # 4-byte Reload
+ addl 104(%esp), %esi
+ movl 140(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 136(%esp), %ebp
+ movl 132(%esp), %edi
+ movl 128(%esp), %ebx
+ movl 124(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %edx
+ movl 108(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx
+ movl 448(%esp), %eax
+ movl %esi, 28(%eax)
+ movl 12(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl %esi, %ebp
+ addl 64(%esp), %ebp
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 88(%esp), %edi
+ movl 84(%esp), %ebx
+ movl 80(%esp), %esi
+ movl 76(%esp), %eax
+ movl 448(%esp), %ecx
+ movl %ebp, 32(%ecx)
+ movl %edx, 36(%ecx)
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ movl %edx, 40(%ecx)
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 56(%ecx)
+ movl %eax, 60(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 68(%ecx)
+ addl $428, %esp # imm = 0x1AC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end128:
+ .size mcl_fpDbl_mulPre9Lbmi2, .Lfunc_end128-mcl_fpDbl_mulPre9Lbmi2
+
+ .globl mcl_fpDbl_sqrPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre9Lbmi2,@function
+mcl_fpDbl_sqrPre9Lbmi2: # @mcl_fpDbl_sqrPre9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $428, %esp # imm = 0x1AC
+ calll .L129$pb
+.L129$pb:
+ popl %ebx
+.Ltmp10:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp10-.L129$pb), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 452(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl %edx, %esi
+ movl %ebx, %edi
+ calll .LmulPv288x32
+ movl 420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl 388(%esp), %ebp
+ movl 448(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 4(%esi), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl %esi, %edx
+ movl %edi, %ebx
+ calll .LmulPv288x32
+ addl 344(%esp), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 364(%esp), %ebx
+ movl 360(%esp), %edi
+ movl 356(%esp), %esi
+ movl 348(%esp), %ecx
+ movl 352(%esp), %edx
+ movl 448(%esp), %eax
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 304(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 324(%esp), %edi
+ movl 320(%esp), %ebp
+ movl 316(%esp), %esi
+ movl 308(%esp), %ecx
+ movl 312(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 264(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 288(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 284(%esp), %ebx
+ movl 280(%esp), %edi
+ movl 276(%esp), %esi
+ movl 268(%esp), %ecx
+ movl 272(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 224(%esp), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 248(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 244(%esp), %edi
+ movl 240(%esp), %ebp
+ movl 236(%esp), %esi
+ movl 228(%esp), %ecx
+ movl 232(%esp), %edx
+ movl 448(%esp), %eax
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 204(%esp), %ebx
+ movl 200(%esp), %edi
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 144(%esp), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 164(%esp), %edi
+ movl 160(%esp), %ebp
+ movl 156(%esp), %esi
+ movl 148(%esp), %ecx
+ movl 152(%esp), %edx
+ movl 448(%esp), %eax
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 4(%esp), %esi # 4-byte Reload
+ addl 104(%esp), %esi
+ movl 140(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 136(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp
+ movl 128(%esp), %ebx
+ movl 124(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 120(%esp), %edi
+ movl 116(%esp), %edx
+ movl 108(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx
+ movl 448(%esp), %eax
+ movl %esi, 28(%eax)
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl %esi, %ebp
+ addl 64(%esp), %ebp
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 88(%esp), %edi
+ movl 84(%esp), %ebx
+ movl 80(%esp), %esi
+ movl 76(%esp), %eax
+ movl 448(%esp), %ecx
+ movl %ebp, 32(%ecx)
+ movl %edx, 36(%ecx)
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ movl %edx, 40(%ecx)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 56(%ecx)
+ movl %eax, 60(%ecx)
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 68(%ecx)
+ addl $428, %esp # imm = 0x1AC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end129:
+ .size mcl_fpDbl_sqrPre9Lbmi2, .Lfunc_end129-mcl_fpDbl_sqrPre9Lbmi2
+
+ .globl mcl_fp_mont9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont9Lbmi2,@function
+mcl_fp_mont9Lbmi2: # @mcl_fp_mont9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $796, %esp # imm = 0x31C
+ calll .L130$pb
+.L130$pb:
+ popl %ebx
+.Ltmp11:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp11-.L130$pb), %ebx
+ movl 828(%esp), %eax
+ movl -4(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 752(%esp), %ebp
+ movl 756(%esp), %esi
+ movl %ebp, %eax
+ imull %edi, %eax
+ movl 788(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 780(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 776(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 772(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 768(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 764(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 760(%esp), %edi
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 712(%esp), %ebp
+ adcl 716(%esp), %esi
+ adcl 720(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 748(%esp), %ebp
+ sbbl %eax, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 64(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 672(%esp), %esi
+ adcl 676(%esp), %edi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 704(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 708(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ andl $1, %ebp
+ addl 632(%esp), %esi
+ adcl 636(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 660(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 824(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ addl 592(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 616(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 624(%esp), %esi
+ adcl 628(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ andl $1, %ebp
+ addl 552(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 580(%esp), %edi
+ adcl 584(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 824(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 512(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 524(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 536(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 544(%esp), %edi
+ adcl 548(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 472(%esp), %ebp
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 484(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 496(%esp), %ebp
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 500(%esp), %esi
+ adcl 504(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 508(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 820(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 432(%esp), %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 444(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 452(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 456(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl %esi, %eax
+ andl $1, %eax
+ addl 392(%esp), %ebp
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 396(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 400(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 404(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 408(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 412(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 416(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 424(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 428(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ addl 352(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 364(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 372(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 384(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 312(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 316(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 328(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 332(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 336(%esp), %esi
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 340(%esp), %edi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 344(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 348(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %ebp
+ movl 824(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 272(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 292(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 296(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 308(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 232(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 240(%esp), %esi
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 244(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 260(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 192(%esp), %ecx
+ adcl 196(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 200(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 212(%esp), %esi
+ adcl 216(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ andl $1, %ebp
+ addl 152(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 164(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 172(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 180(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 112(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 120(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ adcl 136(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ andl $1, %edi
+ addl 72(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 92(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 828(%esp), %ebx
+ subl (%ebx), %eax
+ movl %ecx, %edx
+ sbbl 4(%ebx), %edx
+ movl %esi, %ecx
+ sbbl 8(%ebx), %ecx
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 12(%ebx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 20(%ebx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 24(%ebx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ sbbl 28(%ebx), %esi
+ movl 60(%esp), %ebp # 4-byte Reload
+ sbbl 32(%ebx), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ sbbl $0, %edi
+ andl $1, %edi
+ movl %edi, %ebx
+ jne .LBB130_2
+# BB#1:
+ movl %esi, 32(%esp) # 4-byte Spill
+.LBB130_2:
+ testb %bl, %bl
+ movl 68(%esp), %esi # 4-byte Reload
+ jne .LBB130_4
+# BB#3:
+ movl %eax, %esi
+.LBB130_4:
+ movl 816(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 64(%esp), %eax # 4-byte Reload
+ jne .LBB130_6
+# BB#5:
+ movl %edx, %eax
+.LBB130_6:
+ movl %eax, 4(%ebp)
+ movl 52(%esp), %eax # 4-byte Reload
+ jne .LBB130_8
+# BB#7:
+ movl %ecx, %eax
+.LBB130_8:
+ movl %eax, 8(%ebp)
+ movl 44(%esp), %eax # 4-byte Reload
+ jne .LBB130_10
+# BB#9:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB130_10:
+ movl %eax, 12(%ebp)
+ jne .LBB130_12
+# BB#11:
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+.LBB130_12:
+ movl 40(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebp)
+ movl 36(%esp), %eax # 4-byte Reload
+ jne .LBB130_14
+# BB#13:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB130_14:
+ movl %eax, 20(%ebp)
+ movl 48(%esp), %eax # 4-byte Reload
+ jne .LBB130_16
+# BB#15:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB130_16:
+ movl %eax, 24(%ebp)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebp)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB130_18
+# BB#17:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB130_18:
+ movl %eax, 32(%ebp)
+ addl $796, %esp # imm = 0x31C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end130:
+ .size mcl_fp_mont9Lbmi2, .Lfunc_end130-mcl_fp_mont9Lbmi2
+
+ .globl mcl_fp_montNF9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF9Lbmi2,@function
+mcl_fp_montNF9Lbmi2: # @mcl_fp_montNF9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $796, %esp # imm = 0x31C
+ calll .L131$pb
+.L131$pb:
+ popl %ebx
+.Ltmp12:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp12-.L131$pb), %ebx
+ movl 828(%esp), %eax
+ movl -4(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 752(%esp), %esi
+ movl 756(%esp), %ebp
+ movl %esi, %eax
+ imull %edi, %eax
+ movl 788(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 780(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 776(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 772(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 768(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 764(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 760(%esp), %edi
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 712(%esp), %esi
+ adcl 716(%esp), %ebp
+ adcl 720(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 740(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 708(%esp), %eax
+ addl 672(%esp), %ebp
+ adcl 676(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 696(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 704(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 632(%esp), %ebp
+ adcl 636(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 656(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 664(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 628(%esp), %eax
+ addl 592(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 596(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 600(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 604(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 608(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 612(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 616(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 620(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 624(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 552(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 572(%esp), %esi
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 576(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 588(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 548(%esp), %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ addl 512(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 516(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 520(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 524(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 528(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 532(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 536(%esp), %ebp
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 540(%esp), %edi
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 544(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 472(%esp), %eax
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 496(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 500(%esp), %edi
+ movl %edi, %ebp
+ adcl 504(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 820(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ movl 468(%esp), %eax
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 432(%esp), %ecx
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 436(%esp), %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 440(%esp), %edi
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 444(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 448(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 452(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 460(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 464(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 392(%esp), %ebp
+ adcl 396(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 400(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 412(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 424(%esp), %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 428(%esp), %esi
+ movl 824(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 388(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 352(%esp), %ecx
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 356(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 360(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 364(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 368(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 372(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 376(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 380(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 312(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 324(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 340(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 348(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 308(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 272(%esp), %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 280(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 292(%esp), %ebp
+ adcl 296(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 232(%esp), %edi
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 236(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ adcl 252(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 228(%esp), %ebp
+ movl %esi, %ecx
+ addl 192(%esp), %ecx
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 196(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 208(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 152(%esp), %edi
+ adcl 156(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 160(%esp), %edi
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 164(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 188(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 148(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 112(%esp), %ecx
+ adcl 116(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 120(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 132(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 72(%esp), %edi
+ movl 44(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 80(%esp), %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 84(%esp), %edi
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 92(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 828(%esp), %eax
+ subl (%eax), %edx
+ sbbl 4(%eax), %ebx
+ movl %edi, %ecx
+ sbbl 8(%eax), %ecx
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 12(%eax), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ sbbl 16(%eax), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 20(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 24(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 28(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ sbbl 32(%eax), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB131_2
+# BB#1:
+ movl %edx, %eax
+.LBB131_2:
+ movl 816(%esp), %edx
+ movl %eax, (%edx)
+ movl 64(%esp), %esi # 4-byte Reload
+ js .LBB131_4
+# BB#3:
+ movl %ebx, %esi
+.LBB131_4:
+ movl %esi, 4(%edx)
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB131_6
+# BB#5:
+ movl %ecx, %edi
+.LBB131_6:
+ movl %edi, 8(%edx)
+ js .LBB131_8
+# BB#7:
+ movl 16(%esp), %ebp # 4-byte Reload
+.LBB131_8:
+ movl %ebp, 12(%edx)
+ js .LBB131_10
+# BB#9:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB131_10:
+ movl %eax, 16(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB131_12
+# BB#11:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB131_12:
+ movl %eax, 20(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB131_14
+# BB#13:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB131_14:
+ movl %eax, 24(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB131_16
+# BB#15:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB131_16:
+ movl %eax, 28(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB131_18
+# BB#17:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB131_18:
+ movl %eax, 32(%edx)
+ addl $796, %esp # imm = 0x31C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end131:
+ .size mcl_fp_montNF9Lbmi2, .Lfunc_end131-mcl_fp_montNF9Lbmi2
+
+ .globl mcl_fp_montRed9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed9Lbmi2,@function
+mcl_fp_montRed9Lbmi2: # @mcl_fp_montRed9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $492, %esp # imm = 0x1EC
+ calll .L132$pb
+.L132$pb:
+ popl %ebx
+.Ltmp13:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp13-.L132$pb), %ebx
+ movl 520(%esp), %edx
+ movl -4(%edx), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 516(%esp), %eax
+ movl (%eax), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ imull %edi, %ecx
+ movl 68(%eax), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 64(%eax), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 60(%eax), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 56(%eax), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 52(%eax), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 48(%eax), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 44(%eax), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 40(%eax), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 36(%eax), %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ movl 32(%eax), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 24(%eax), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 20(%eax), %ebp
+ movl 16(%eax), %edi
+ movl 12(%eax), %esi
+ movl 8(%eax), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl (%edx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 16(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%edx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 8(%edx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 4(%edx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, (%esp)
+ leal 448(%esp), %ecx
+ calll .LmulPv288x32
+ movl 76(%esp), %eax # 4-byte Reload
+ addl 448(%esp), %eax
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 452(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 460(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 464(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 468(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ sbbl %eax, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ movl 76(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 408(%esp), %ecx
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 412(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 416(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 424(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 428(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 432(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 436(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 440(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 368(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 404(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 328(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 364(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 288(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ movl 64(%esp), %eax # 4-byte Reload
+ addl 288(%esp), %eax
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 292(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 520(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 248(%esp), %esi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 264(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %edi, %esi
+ adcl $0, %esi
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 208(%esp), %edi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 212(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 220(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 520(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 168(%esp), %ebp
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 180(%esp), %ebp
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 184(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 128(%esp), %edi
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl 136(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ adcl 140(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ subl 20(%esp), %edi # 4-byte Folded Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 16(%esp), %eax # 4-byte Folded Reload
+ sbbl 24(%esp), %esi # 4-byte Folded Reload
+ sbbl 28(%esp), %ecx # 4-byte Folded Reload
+ sbbl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 92(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ movl %ebp, %edx
+ sbbl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 104(%esp) # 4-byte Spill
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB132_2
+# BB#1:
+ movl %ecx, 116(%esp) # 4-byte Spill
+.LBB132_2:
+ testb %dl, %dl
+ movl 120(%esp), %ecx # 4-byte Reload
+ jne .LBB132_4
+# BB#3:
+ movl %edi, %ecx
+.LBB132_4:
+ movl 512(%esp), %edi
+ movl %ecx, (%edi)
+ movl 88(%esp), %ecx # 4-byte Reload
+ jne .LBB132_6
+# BB#5:
+ movl %eax, 124(%esp) # 4-byte Spill
+.LBB132_6:
+ movl 124(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%edi)
+ movl 96(%esp), %eax # 4-byte Reload
+ jne .LBB132_8
+# BB#7:
+ movl %esi, %eax
+.LBB132_8:
+ movl %eax, 8(%edi)
+ movl 116(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl 108(%esp), %ebp # 4-byte Reload
+ jne .LBB132_10
+# BB#9:
+ movl 72(%esp), %ebp # 4-byte Reload
+.LBB132_10:
+ movl %ebp, 16(%edi)
+ movl 112(%esp), %ebx # 4-byte Reload
+ jne .LBB132_12
+# BB#11:
+ movl 76(%esp), %ebx # 4-byte Reload
+.LBB132_12:
+ movl %ebx, 20(%edi)
+ movl 100(%esp), %esi # 4-byte Reload
+ jne .LBB132_14
+# BB#13:
+ movl 84(%esp), %esi # 4-byte Reload
+.LBB132_14:
+ movl %esi, 24(%edi)
+ jne .LBB132_16
+# BB#15:
+ movl 92(%esp), %ecx # 4-byte Reload
+.LBB132_16:
+ movl %ecx, 28(%edi)
+ jne .LBB132_18
+# BB#17:
+ movl 104(%esp), %eax # 4-byte Reload
+.LBB132_18:
+ movl %eax, 32(%edi)
+ addl $492, %esp # imm = 0x1EC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end132:
+ .size mcl_fp_montRed9Lbmi2, .Lfunc_end132-mcl_fp_montRed9Lbmi2
+
+ .globl mcl_fp_addPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre9Lbmi2,@function
+mcl_fp_addPre9Lbmi2: # @mcl_fp_addPre9Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl %esi, 24(%ebx)
+ movl %edx, 28(%ebx)
+ movl 32(%eax), %eax
+ movl 32(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 32(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end133:
+ .size mcl_fp_addPre9Lbmi2, .Lfunc_end133-mcl_fp_addPre9Lbmi2
+
+ .globl mcl_fp_subPre9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre9Lbmi2,@function
+mcl_fp_subPre9Lbmi2: # @mcl_fp_subPre9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edi, 24(%ebp)
+ movl %esi, 28(%ebp)
+ movl 32(%edx), %edx
+ movl 32(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 32(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end134:
+ .size mcl_fp_subPre9Lbmi2, .Lfunc_end134-mcl_fp_subPre9Lbmi2
+
+ .globl mcl_fp_shr1_9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_9Lbmi2,@function
+mcl_fp_shr1_9Lbmi2: # @mcl_fp_shr1_9Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 24(%esi)
+ movl 32(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 28(%esi)
+ shrl %eax
+ movl %eax, 32(%esi)
+ popl %esi
+ retl
+.Lfunc_end135:
+ .size mcl_fp_shr1_9Lbmi2, .Lfunc_end135-mcl_fp_shr1_9Lbmi2
+
+ .globl mcl_fp_add9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add9Lbmi2,@function
+mcl_fp_add9Lbmi2: # @mcl_fp_add9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 44(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, %ebp
+ adcl 4(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %eax
+ adcl 12(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 20(%ebx), %esi
+ adcl 20(%edi), %esi
+ movl 24(%ebx), %edx
+ adcl 24(%edi), %edx
+ movl 28(%ebx), %ecx
+ adcl 28(%edi), %ecx
+ movl 32(%ebx), %eax
+ adcl 32(%edi), %eax
+ movl 40(%esp), %edi
+ movl %ebp, (%edi)
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%edi)
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%edi)
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%edi)
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%edi)
+ movl %esi, 20(%edi)
+ movl %edx, 24(%edi)
+ movl %ecx, 28(%edi)
+ movl %eax, 32(%edi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 52(%esp), %edi
+ subl (%edi), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl 16(%esp), %ebp # 4-byte Reload
+ sbbl 4(%edi), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ sbbl 8(%edi), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebp # 4-byte Reload
+ sbbl 12(%edi), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 4(%esp), %ebp # 4-byte Reload
+ sbbl 16(%edi), %ebp
+ sbbl 20(%edi), %esi
+ sbbl 24(%edi), %edx
+ sbbl 28(%edi), %ecx
+ sbbl 32(%edi), %eax
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB136_2
+# BB#1: # %nocarry
+ movl (%esp), %edi # 4-byte Reload
+ movl 40(%esp), %ebx
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl %ebp, 16(%ebx)
+ movl %esi, 20(%ebx)
+ movl %edx, 24(%ebx)
+ movl %ecx, 28(%ebx)
+ movl %eax, 32(%ebx)
+.LBB136_2: # %carry
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end136:
+ .size mcl_fp_add9Lbmi2, .Lfunc_end136-mcl_fp_add9Lbmi2
+
+ .globl mcl_fp_addNF9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF9Lbmi2,@function
+mcl_fp_addNF9Lbmi2: # @mcl_fp_addNF9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 100(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edi
+ movl 96(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 4(%esi), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 28(%eax), %ebp
+ movl 24(%eax), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 20(%eax), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 16(%eax), %ebx
+ movl 12(%eax), %edx
+ movl 8(%eax), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 12(%esi), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 16(%esi), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 20(%esi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 24(%esi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 28(%esi), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 32(%esi), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 104(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, %ebp
+ subl (%esi), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ sbbl 4(%esi), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ sbbl 8(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ sbbl 16(%esi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ sbbl 20(%esi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ sbbl 24(%esi), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 28(%esi), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edx
+ movl %ecx, %ebp
+ sbbl 32(%esi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sarl $31, %esi
+ testl %esi, %esi
+ js .LBB137_2
+# BB#1:
+ movl (%esp), %eax # 4-byte Reload
+.LBB137_2:
+ movl 92(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB137_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB137_4:
+ movl %eax, 4(%ecx)
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB137_6
+# BB#5:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB137_6:
+ movl %eax, 8(%ecx)
+ movl %ebp, %eax
+ js .LBB137_8
+# BB#7:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB137_8:
+ movl %edx, 12(%ecx)
+ movl 56(%esp), %edx # 4-byte Reload
+ js .LBB137_10
+# BB#9:
+ movl 16(%esp), %ebx # 4-byte Reload
+.LBB137_10:
+ movl %ebx, 16(%ecx)
+ js .LBB137_12
+# BB#11:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB137_12:
+ movl %edi, 20(%ecx)
+ js .LBB137_14
+# BB#13:
+ movl 24(%esp), %esi # 4-byte Reload
+.LBB137_14:
+ movl %esi, 24(%ecx)
+ js .LBB137_16
+# BB#15:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB137_16:
+ movl %edx, 28(%ecx)
+ js .LBB137_18
+# BB#17:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB137_18:
+ movl %eax, 32(%ecx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end137:
+ .size mcl_fp_addNF9Lbmi2, .Lfunc_end137-mcl_fp_addNF9Lbmi2
+
+ .globl mcl_fp_sub9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub9Lbmi2,@function
+mcl_fp_sub9Lbmi2: # @mcl_fp_sub9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 52(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 56(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 16(%esi), %edx
+ sbbl 16(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 20(%esi), %ecx
+ sbbl 20(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 28(%esi), %ebp
+ sbbl 28(%edi), %ebp
+ movl 32(%esi), %esi
+ sbbl 32(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 48(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl %edx, 16(%ebx)
+ movl %ecx, 20(%ebx)
+ movl %eax, 24(%ebx)
+ movl %ebp, 28(%ebx)
+ movl %esi, 32(%ebx)
+ je .LBB138_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 60(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl %ecx, 24(%ebx)
+ movl 28(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 28(%ebx)
+ movl 32(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 32(%ebx)
+.LBB138_2: # %nocarry
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end138:
+ .size mcl_fp_sub9Lbmi2, .Lfunc_end138-mcl_fp_sub9Lbmi2
+
+ .globl mcl_fp_subNF9Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF9Lbmi2,@function
+mcl_fp_subNF9Lbmi2: # @mcl_fp_subNF9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $48, %esp
+ movl 72(%esp), %edx
+ movl (%edx), %ecx
+ movl 4(%edx), %eax
+ movl 76(%esp), %esi
+ subl (%esi), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ sbbl 4(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 28(%edx), %ebp
+ movl 24(%edx), %edi
+ movl 20(%edx), %ebx
+ movl 16(%edx), %ecx
+ movl 12(%edx), %eax
+ movl 8(%edx), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ sbbl 12(%esi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 16(%esi), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ sbbl 20(%esi), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ sbbl 24(%esi), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ sbbl 28(%esi), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 32(%esi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ sarl $31, %ecx
+ movl %ecx, %edi
+ shldl $1, %eax, %edi
+ movl 80(%esp), %ebp
+ movl 12(%ebp), %eax
+ andl %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 4(%ebp), %ebx
+ andl %edi, %ebx
+ andl (%ebp), %edi
+ movl 32(%ebp), %eax
+ andl %ecx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ rorxl $31, %ecx, %eax
+ andl 28(%ebp), %ecx
+ movl 24(%ebp), %edx
+ andl %eax, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 20(%ebp), %esi
+ andl %eax, %esi
+ movl 16(%ebp), %edx
+ andl %eax, %edx
+ andl 8(%ebp), %eax
+ addl 36(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl 68(%esp), %ebp
+ movl %edi, (%ebp)
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 4(%ebp)
+ movl 4(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %eax, 8(%ebp)
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 12(%ebp)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 16(%ebp)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 20(%ebp)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 24(%ebp)
+ movl %ecx, 28(%ebp)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ebp)
+ addl $48, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end139:
+ .size mcl_fp_subNF9Lbmi2, .Lfunc_end139-mcl_fp_subNF9Lbmi2
+
+ .globl mcl_fpDbl_add9Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add9Lbmi2,@function
+mcl_fpDbl_add9Lbmi2: # @mcl_fpDbl_add9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $68, %esp
+ movl 96(%esp), %edx
+ movl 92(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %ecx
+ movl 8(%edx), %ebx
+ movl (%edx), %ebp
+ addl (%edi), %ebp
+ movl 88(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%edx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%edx), %esi
+ adcl 16(%edx), %ecx
+ movl %ebp, 4(%eax)
+ movl 44(%edx), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl %ebx, 8(%eax)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %ecx, 16(%eax)
+ movl 24(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 28(%edx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %ecx, 24(%eax)
+ movl 32(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 36(%edx), %ebp
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebp, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 40(%edx), %esi
+ movl %ecx, 32(%eax)
+ movl 40(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%edi), %eax
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl 48(%edi), %ebx
+ adcl %ecx, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ movl 52(%edi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 56(%edx), %esi
+ movl 56(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%edx), %ebp
+ movl 60(%edi), %esi
+ adcl %ebp, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 64(%edx), %eax
+ movl 64(%edi), %ebp
+ adcl %eax, %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 68(%edx), %edx
+ movl 68(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 100(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ subl (%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 4(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 8(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 12(%edi), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 20(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ sbbl 24(%edi), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 32(%esp), %ebp # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ sbbl 32(%edi), %ebx
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB140_2
+# BB#1:
+ movl %ebx, %ebp
+.LBB140_2:
+ testb %dl, %dl
+ movl 60(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl 36(%esp), %esi # 4-byte Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ jne .LBB140_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB140_4:
+ movl 88(%esp), %eax
+ movl %edx, 36(%eax)
+ movl %ebx, 40(%eax)
+ movl %edi, 44(%eax)
+ movl %esi, 48(%eax)
+ movl %ecx, 52(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB140_6
+# BB#5:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB140_6:
+ movl %ecx, 56(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB140_8
+# BB#7:
+ movl 24(%esp), %edx # 4-byte Reload
+.LBB140_8:
+ movl %edx, 60(%eax)
+ jne .LBB140_10
+# BB#9:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB140_10:
+ movl %ecx, 64(%eax)
+ movl %ebp, 68(%eax)
+ addl $68, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end140:
+ .size mcl_fpDbl_add9Lbmi2, .Lfunc_end140-mcl_fpDbl_add9Lbmi2
+
+ .globl mcl_fpDbl_sub9Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub9Lbmi2,@function
+mcl_fpDbl_sub9Lbmi2: # @mcl_fpDbl_sub9Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 76(%esp), %ebx
+ movl (%ebx), %eax
+ movl 4(%ebx), %edx
+ movl 80(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %edx
+ movl 8(%ebx), %esi
+ sbbl 8(%ebp), %esi
+ movl 72(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%ebx), %eax
+ sbbl 12(%ebp), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%ebx), %edx
+ sbbl 16(%ebp), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebp), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%ebx), %eax
+ sbbl %esi, %eax
+ movl 24(%ebp), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%ebx), %edx
+ sbbl %esi, %edx
+ movl 28(%ebp), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%ebx), %eax
+ sbbl %esi, %eax
+ movl 32(%ebp), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%ebx), %edx
+ sbbl %esi, %edx
+ movl 36(%ebp), %esi
+ movl %eax, 28(%ecx)
+ movl 36(%ebx), %eax
+ sbbl %esi, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%ebp), %eax
+ movl %edx, 32(%ecx)
+ movl 40(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 44(%ebp), %eax
+ movl 44(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%ebp), %eax
+ movl 48(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 52(%ebp), %eax
+ movl 52(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 56(%ebp), %eax
+ movl 56(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 60(%ebp), %eax
+ movl 60(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 64(%ebp), %eax
+ movl 64(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 68(%ebp), %eax
+ movl 68(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 84(%esp), %ebp
+ jne .LBB141_1
+# BB#2:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB141_3
+.LBB141_1:
+ movl 32(%ebp), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+.LBB141_3:
+ testb %al, %al
+ jne .LBB141_4
+# BB#5:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ movl $0, %esi
+ jmp .LBB141_6
+.LBB141_4:
+ movl (%ebp), %esi
+ movl 4(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB141_6:
+ jne .LBB141_7
+# BB#8:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB141_9
+.LBB141_7:
+ movl 28(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB141_9:
+ jne .LBB141_10
+# BB#11:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB141_12
+.LBB141_10:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB141_12:
+ jne .LBB141_13
+# BB#14:
+ movl $0, %edi
+ jmp .LBB141_15
+.LBB141_13:
+ movl 20(%ebp), %edi
+.LBB141_15:
+ jne .LBB141_16
+# BB#17:
+ movl $0, %ebx
+ jmp .LBB141_18
+.LBB141_16:
+ movl 16(%ebp), %ebx
+.LBB141_18:
+ jne .LBB141_19
+# BB#20:
+ movl %ebp, %eax
+ movl $0, %ebp
+ jmp .LBB141_21
+.LBB141_19:
+ movl %ebp, %eax
+ movl 12(%eax), %ebp
+.LBB141_21:
+ jne .LBB141_22
+# BB#23:
+ xorl %eax, %eax
+ jmp .LBB141_24
+.LBB141_22:
+ movl 8(%eax), %eax
+.LBB141_24:
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 36(%ecx)
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 40(%ecx)
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 48(%ecx)
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 56(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 60(%ecx)
+ movl %eax, 64(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%ecx)
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end141:
+ .size mcl_fpDbl_sub9Lbmi2, .Lfunc_end141-mcl_fpDbl_sub9Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv320x32,@function
+.LmulPv320x32: # @mulPv320x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl %edx, %eax
+ movl 48(%esp), %edx
+ mulxl 4(%eax), %edi, %esi
+ mulxl (%eax), %ebp, %ebx
+ movl %ebp, 24(%esp) # 4-byte Spill
+ addl %edi, %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ mulxl 8(%eax), %edi, %ebx
+ adcl %esi, %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ mulxl 12(%eax), %esi, %edi
+ adcl %ebx, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ mulxl 16(%eax), %esi, %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ mulxl 20(%eax), %ebx, %esi
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ mulxl 24(%eax), %edi, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ movl %ebp, %edx
+ mulxl 28(%eax), %esi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %ebp, %edx
+ mulxl 32(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%ecx)
+ movl %ebx, 20(%ecx)
+ movl %edi, 24(%ecx)
+ movl %esi, 28(%ecx)
+ movl %edx, 32(%ecx)
+ movl 48(%esp), %edx
+ mulxl 36(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ adcl $0, %edx
+ movl %edx, 40(%ecx)
+ movl %ecx, %eax
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end142:
+ .size .LmulPv320x32, .Lfunc_end142-.LmulPv320x32
+
+ .globl mcl_fp_mulUnitPre10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre10Lbmi2,@function
+mcl_fp_mulUnitPre10Lbmi2: # @mcl_fp_mulUnitPre10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $76, %esp
+ calll .L143$pb
+.L143$pb:
+ popl %ebx
+.Ltmp14:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp14-.L143$pb), %ebx
+ movl 104(%esp), %eax
+ movl %eax, (%esp)
+ leal 32(%esp), %ecx
+ movl 100(%esp), %edx
+ calll .LmulPv320x32
+ movl 72(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 60(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 56(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx
+ movl 48(%esp), %ebp
+ movl 44(%esp), %edi
+ movl 40(%esp), %esi
+ movl 32(%esp), %edx
+ movl 36(%esp), %ecx
+ movl 96(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebp, 16(%eax)
+ movl %ebx, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ addl $76, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end143:
+ .size mcl_fp_mulUnitPre10Lbmi2, .Lfunc_end143-mcl_fp_mulUnitPre10Lbmi2
+
+ .globl mcl_fpDbl_mulPre10Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre10Lbmi2,@function
+mcl_fpDbl_mulPre10Lbmi2: # @mcl_fpDbl_mulPre10Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $188, %esp
+ calll .L144$pb
+.L144$pb:
+ popl %ebx
+.Ltmp15:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp15-.L144$pb), %ebx
+ movl %ebx, -128(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl 12(%ebp), %esi
+ movl %esi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre5Lbmi2@PLT
+ leal 20(%edi), %eax
+ movl %eax, 8(%esp)
+ leal 20(%esi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 40(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre5Lbmi2@PLT
+ movl 28(%esi), %edi
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ addl 20(%esi), %ebx
+ movl %ebx, -148(%ebp) # 4-byte Spill
+ adcl 24(%esi), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ adcl 8(%esi), %edi
+ movl %edi, -140(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ addl 20(%esi), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ adcl 24(%esi), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ movl 28(%esi), %eax
+ adcl 8(%esi), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl 32(%esi), %eax
+ adcl 12(%esi), %eax
+ movl 36(%esi), %ecx
+ adcl 16(%esi), %ecx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %esi
+ popl %eax
+ movl %esi, -156(%ebp) # 4-byte Spill
+ movl %ebx, -124(%ebp) # 4-byte Spill
+ jb .LBB144_2
+# BB#1:
+ xorl %edi, %edi
+ movl $0, -124(%ebp) # 4-byte Folded Spill
+.LBB144_2:
+ movl %edi, -136(%ebp) # 4-byte Spill
+ movl 12(%ebp), %esi
+ movl %esi, %ebx
+ movl 36(%ebx), %esi
+ movl 32(%ebx), %edi
+ movl -96(%ebp), %edx # 4-byte Reload
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 12(%ebx), %edi
+ movl %edi, -116(%ebp) # 4-byte Spill
+ adcl 16(%ebx), %esi
+ movl %esi, -144(%ebp) # 4-byte Spill
+ movl %ecx, -112(%ebp) # 4-byte Spill
+ movl %eax, -104(%ebp) # 4-byte Spill
+ movl -160(%ebp), %edx # 4-byte Reload
+ movl %edx, -108(%ebp) # 4-byte Spill
+ movl -120(%ebp), %esi # 4-byte Reload
+ movl %esi, -96(%ebp) # 4-byte Spill
+ movl -152(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -100(%ebp) # 4-byte Spill
+ jb .LBB144_4
+# BB#3:
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+ movl $0, -104(%ebp) # 4-byte Folded Spill
+ movl $0, -108(%ebp) # 4-byte Folded Spill
+ movl $0, -96(%ebp) # 4-byte Folded Spill
+ movl $0, -100(%ebp) # 4-byte Folded Spill
+.LBB144_4:
+ movl -148(%ebp), %esi # 4-byte Reload
+ movl %esi, -72(%ebp)
+ movl -132(%ebp), %edi # 4-byte Reload
+ movl %edi, -68(%ebp)
+ movl -140(%ebp), %esi # 4-byte Reload
+ movl %esi, -64(%ebp)
+ movl %ebx, -92(%ebp)
+ movl -120(%ebp), %esi # 4-byte Reload
+ movl %esi, -88(%ebp)
+ movl %edx, -84(%ebp)
+ movl %eax, -80(%ebp)
+ movl %ecx, -76(%ebp)
+ sbbl %edx, %edx
+ movl -116(%ebp), %eax # 4-byte Reload
+ movl %eax, -60(%ebp)
+ movl -144(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -56(%ebp)
+ movl -156(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB144_6
+# BB#5:
+ movl $0, %ebx
+ movl $0, %eax
+ movl $0, %edi
+.LBB144_6:
+ movl %eax, -116(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -92(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -72(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -52(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -124(%ebp), %eax # 4-byte Reload
+ addl %eax, -100(%ebp) # 4-byte Folded Spill
+ adcl %edi, -96(%ebp) # 4-byte Folded Spill
+ movl -108(%ebp), %esi # 4-byte Reload
+ adcl -136(%ebp), %esi # 4-byte Folded Reload
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl %eax, -104(%ebp) # 4-byte Folded Spill
+ movl -112(%ebp), %edi # 4-byte Reload
+ adcl %ebx, %edi
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ andl $1, %edx
+ movl %edx, -116(%ebp) # 4-byte Spill
+ movl -128(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre5Lbmi2@PLT
+ movl -100(%ebp), %eax # 4-byte Reload
+ addl -32(%ebp), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ adcl -24(%ebp), %esi
+ movl %esi, -108(%ebp) # 4-byte Spill
+ movl -104(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ adcl -16(%ebp), %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl %eax, -116(%ebp) # 4-byte Folded Spill
+ movl -52(%ebp), %ecx
+ movl 8(%ebp), %esi
+ subl (%esi), %ecx
+ movl -48(%ebp), %ebx
+ sbbl 4(%esi), %ebx
+ movl -44(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ movl -40(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl -36(%ebp), %edi
+ sbbl 16(%esi), %edi
+ movl 20(%esi), %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+ sbbl %eax, -100(%ebp) # 4-byte Folded Spill
+ movl 24(%esi), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ sbbl %eax, -96(%ebp) # 4-byte Folded Spill
+ movl 28(%esi), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ sbbl %eax, -108(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ sbbl %eax, -104(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ sbbl %eax, -112(%ebp) # 4-byte Folded Spill
+ sbbl $0, -116(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ subl %eax, %ecx
+ movl 44(%esi), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 48(%esi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ sbbl %eax, -120(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ sbbl %eax, %edx
+ movl 56(%esi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl 60(%esi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ sbbl %eax, -100(%ebp) # 4-byte Folded Spill
+ movl 64(%esi), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ sbbl %eax, -96(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ sbbl %eax, -108(%ebp) # 4-byte Folded Spill
+ movl 72(%esi), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ sbbl %eax, -104(%ebp) # 4-byte Folded Spill
+ movl 76(%esi), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ sbbl %eax, -112(%ebp) # 4-byte Folded Spill
+ sbbl $0, -116(%ebp) # 4-byte Folded Spill
+ addl -124(%ebp), %ecx # 4-byte Folded Reload
+ adcl -128(%ebp), %ebx # 4-byte Folded Reload
+ movl %ecx, 20(%esi)
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl -132(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 24(%esi)
+ adcl -136(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ adcl -140(%ebp), %edi # 4-byte Folded Reload
+ movl %edx, 32(%esi)
+ movl -100(%ebp), %eax # 4-byte Reload
+ adcl -160(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 36(%esi)
+ movl -96(%ebp), %ecx # 4-byte Reload
+ adcl -164(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%esi)
+ movl -108(%ebp), %eax # 4-byte Reload
+ adcl -168(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 44(%esi)
+ movl -104(%ebp), %ecx # 4-byte Reload
+ adcl -172(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl -112(%ebp), %edx # 4-byte Reload
+ adcl -176(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl -180(%ebp), %eax # 4-byte Folded Reload
+ movl %edx, 56(%esi)
+ movl %eax, 60(%esi)
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 64(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 68(%esi)
+ movl -152(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 72(%esi)
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 76(%esi)
+ addl $188, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end144:
+ .size mcl_fpDbl_mulPre10Lbmi2, .Lfunc_end144-mcl_fpDbl_mulPre10Lbmi2
+
+ .globl mcl_fpDbl_sqrPre10Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre10Lbmi2,@function
+mcl_fpDbl_sqrPre10Lbmi2: # @mcl_fpDbl_sqrPre10Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $188, %esp
+ calll .L145$pb
+.L145$pb:
+ popl %ebx
+.Ltmp16:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp16-.L145$pb), %ebx
+ movl %ebx, -120(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre5Lbmi2@PLT
+ leal 20(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 40(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre5Lbmi2@PLT
+ movl 36(%edi), %eax
+ movl 32(%edi), %ebx
+ movl 28(%edi), %esi
+ movl (%edi), %ecx
+ movl 4(%edi), %edx
+ addl 20(%edi), %ecx
+ adcl 24(%edi), %edx
+ adcl 8(%edi), %esi
+ adcl 12(%edi), %ebx
+ movl %ebx, -124(%ebp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -128(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -108(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -104(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -100(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -96(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ sbbl %ebx, %ebx
+ movl %ebx, -116(%ebp) # 4-byte Spill
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_1
+# BB#2:
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_3
+.LBB145_1:
+ leal (%ecx,%ecx), %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+.LBB145_3:
+ movl -96(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ movl -124(%ebp), %edi # 4-byte Reload
+ jb .LBB145_4
+# BB#5:
+ movl $0, -96(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_6
+.LBB145_4:
+ movl %edx, %ebx
+ shldl $1, %ecx, %ebx
+ movl %ebx, -96(%ebp) # 4-byte Spill
+.LBB145_6:
+ movl -100(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_7
+# BB#8:
+ movl $0, -100(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_9
+.LBB145_7:
+ movl %esi, %ebx
+ shldl $1, %edx, %ebx
+ movl %ebx, -100(%ebp) # 4-byte Spill
+.LBB145_9:
+ movl -104(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_10
+# BB#11:
+ movl $0, -104(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_12
+.LBB145_10:
+ movl %edi, %ebx
+ shldl $1, %esi, %ebx
+ movl %ebx, -104(%ebp) # 4-byte Spill
+.LBB145_12:
+ movl -108(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_13
+# BB#14:
+ movl $0, -108(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_15
+.LBB145_13:
+ movl %eax, %ebx
+ shldl $1, %edi, %ebx
+ movl %ebx, -108(%ebp) # 4-byte Spill
+.LBB145_15:
+ movl %ecx, -72(%ebp)
+ movl %edx, -68(%ebp)
+ movl %esi, -64(%ebp)
+ movl %edi, -60(%ebp)
+ movl %eax, -56(%ebp)
+ movl %ecx, -92(%ebp)
+ movl %edx, -88(%ebp)
+ movl %esi, -84(%ebp)
+ movl %edi, -80(%ebp)
+ movl %eax, -76(%ebp)
+ movl -128(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_16
+# BB#17:
+ movl $0, -124(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_18
+.LBB145_16:
+ shrl $31, %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+.LBB145_18:
+ leal -52(%ebp), %eax
+ movl %eax, (%esp)
+ leal -72(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -92(%ebp), %eax
+ movl %eax, 8(%esp)
+ movl -116(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -120(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre5Lbmi2@PLT
+ movl -112(%ebp), %edi # 4-byte Reload
+ addl -32(%ebp), %edi
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl -100(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ movl -104(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ movl -108(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -108(%ebp) # 4-byte Spill
+ adcl -124(%ebp), %esi # 4-byte Folded Reload
+ movl -52(%ebp), %edx
+ movl 8(%ebp), %eax
+ subl (%eax), %edx
+ movl -48(%ebp), %ebx
+ sbbl 4(%eax), %ebx
+ movl -44(%ebp), %ecx
+ sbbl 8(%eax), %ecx
+ movl %ecx, -116(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ecx
+ sbbl 12(%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -36(%ebp), %ecx
+ sbbl 16(%eax), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ movl 20(%eax), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 28(%eax), %ecx
+ movl %ecx, -132(%ebp) # 4-byte Spill
+ sbbl %ecx, -100(%ebp) # 4-byte Folded Spill
+ movl 32(%eax), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ sbbl %ecx, -104(%ebp) # 4-byte Folded Spill
+ movl 36(%eax), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ sbbl %ecx, -108(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ movl 40(%eax), %ecx
+ movl %ecx, -160(%ebp) # 4-byte Spill
+ subl %ecx, %edx
+ movl 44(%eax), %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 48(%eax), %ecx
+ movl %ecx, -168(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 52(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl -144(%ebp), %edi # 4-byte Reload
+ sbbl %ecx, %edi
+ movl 56(%eax), %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 60(%eax), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 64(%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 68(%eax), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ sbbl %ecx, -100(%ebp) # 4-byte Folded Spill
+ movl 72(%eax), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ sbbl %ecx, -104(%ebp) # 4-byte Folded Spill
+ movl 76(%eax), %ecx
+ movl %ecx, -156(%ebp) # 4-byte Spill
+ sbbl %ecx, -108(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ addl -124(%ebp), %edx # 4-byte Folded Reload
+ adcl -128(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 20(%eax)
+ movl -116(%ebp), %ecx # 4-byte Reload
+ adcl -132(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 24(%eax)
+ adcl -136(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 28(%eax)
+ movl -120(%ebp), %edx # 4-byte Reload
+ adcl -140(%ebp), %edx # 4-byte Folded Reload
+ movl %edi, 32(%eax)
+ movl -112(%ebp), %ecx # 4-byte Reload
+ adcl -160(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 36(%eax)
+ movl -96(%ebp), %edx # 4-byte Reload
+ adcl -164(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 40(%eax)
+ movl -100(%ebp), %ecx # 4-byte Reload
+ adcl -168(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 44(%eax)
+ movl -104(%ebp), %edx # 4-byte Reload
+ adcl -172(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 48(%eax)
+ movl -108(%ebp), %ecx # 4-byte Reload
+ adcl -176(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 52(%eax)
+ adcl -180(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 56(%eax)
+ movl %esi, 60(%eax)
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 64(%eax)
+ movl -148(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 68(%eax)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 72(%eax)
+ movl -156(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 76(%eax)
+ addl $188, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end145:
+ .size mcl_fpDbl_sqrPre10Lbmi2, .Lfunc_end145-mcl_fpDbl_sqrPre10Lbmi2
+
+ .globl mcl_fp_mont10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont10Lbmi2,@function
+mcl_fp_mont10Lbmi2: # @mcl_fp_mont10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1036, %esp # imm = 0x40C
+ calll .L146$pb
+.L146$pb:
+ popl %ebx
+.Ltmp17:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp17-.L146$pb), %ebx
+ movl 1068(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 992(%esp), %edi
+ movl 996(%esp), %ebp
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1032(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1028(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1024(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1020(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1016(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1012(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1008(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1004(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1000(%esp), %esi
+ movl %eax, (%esp)
+ leal 944(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ addl 944(%esp), %edi
+ adcl 948(%esp), %ebp
+ adcl 952(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 1064(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 896(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ addl 896(%esp), %ebp
+ adcl 900(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 936(%esp), %edi
+ sbbl %eax, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 848(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ movl 64(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 848(%esp), %ebp
+ adcl 852(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 856(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 864(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 868(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 872(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 876(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 880(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 884(%esp), %ebp
+ adcl 888(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 800(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ addl 800(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 832(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 836(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %esi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1068(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 752(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 780(%esp), %esi
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 784(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 704(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 716(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 728(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 732(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 736(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 744(%esp), %edi
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 656(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %esi
+ movl %esi, %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ addl 656(%esp), %eax
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 676(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 688(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 696(%esp), %edi
+ adcl $0, %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 608(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 624(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 636(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 640(%esp), %esi
+ adcl 644(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 648(%esp), %edi
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 572(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 592(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 600(%esp), %edi
+ adcl $0, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 512(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 520(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 548(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 464(%esp), %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 472(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 484(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 496(%esp), %ebp
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 504(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 416(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 432(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 444(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 452(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 368(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 380(%esp), %esi
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 384(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 400(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 320(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 320(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 328(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 332(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 348(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 360(%esp), %ebp
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 272(%esp), %esi
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 276(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 288(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 312(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl $0, %ebp
+ movl 1064(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl %edi, %ecx
+ addl 224(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 236(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 240(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 264(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %ebp
+ addl 176(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 192(%esp), %esi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 196(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 1064(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 128(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 140(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ adcl 144(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 168(%esp), %ebp
+ sbbl %esi, %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 80(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %esi
+ addl 80(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 84(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 92(%esp), %ebx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 120(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl 1068(%esp), %edx
+ subl (%edx), %eax
+ sbbl 4(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ sbbl 8(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 12(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ sbbl 16(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ sbbl 20(%edx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ sbbl 24(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edx), %ecx
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 32(%edx), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ sbbl 36(%edx), %ebp
+ movl %ebp, %edx
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB146_2
+# BB#1:
+ movl %ecx, 48(%esp) # 4-byte Spill
+.LBB146_2:
+ movl %esi, %ecx
+ testb %cl, %cl
+ movl 76(%esp), %esi # 4-byte Reload
+ jne .LBB146_4
+# BB#3:
+ movl %eax, %esi
+.LBB146_4:
+ movl 1056(%esp), %eax
+ movl %esi, (%eax)
+ movl 60(%esp), %edi # 4-byte Reload
+ jne .LBB146_6
+# BB#5:
+ movl 16(%esp), %edi # 4-byte Reload
+.LBB146_6:
+ movl %edi, 4(%eax)
+ jne .LBB146_8
+# BB#7:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB146_8:
+ movl %ebx, 8(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB146_10
+# BB#9:
+ movl 24(%esp), %ebp # 4-byte Reload
+.LBB146_10:
+ movl %ebp, 12(%eax)
+ jne .LBB146_12
+# BB#11:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB146_12:
+ movl %ecx, 16(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB146_14
+# BB#13:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB146_14:
+ movl %ecx, 20(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB146_16
+# BB#15:
+ movl 56(%esp), %ecx # 4-byte Reload
+.LBB146_16:
+ movl %ecx, 24(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB146_18
+# BB#17:
+ movl 64(%esp), %ecx # 4-byte Reload
+.LBB146_18:
+ movl %ecx, 32(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB146_20
+# BB#19:
+ movl %edx, %ecx
+.LBB146_20:
+ movl %ecx, 36(%eax)
+ addl $1036, %esp # imm = 0x40C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end146:
+ .size mcl_fp_mont10Lbmi2, .Lfunc_end146-mcl_fp_mont10Lbmi2
+
+ .globl mcl_fp_montNF10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF10Lbmi2,@function
+mcl_fp_montNF10Lbmi2: # @mcl_fp_montNF10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1020, %esp # imm = 0x3FC
+ calll .L147$pb
+.L147$pb:
+ popl %ebx
+.Ltmp18:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp18-.L147$pb), %ebx
+ movl 1052(%esp), %eax
+ movl -4(%eax), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 976(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 976(%esp), %edi
+ movl 980(%esp), %esi
+ movl %edi, %eax
+ imull %ebp, %eax
+ movl 1016(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1012(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1008(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1004(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1000(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 996(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 992(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 988(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 984(%esp), %ebp
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 928(%esp), %edi
+ adcl 932(%esp), %esi
+ adcl 936(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 952(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 920(%esp), %ecx
+ addl 880(%esp), %esi
+ adcl 884(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 900(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 832(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 832(%esp), %esi
+ adcl 836(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 848(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 872(%esp), %esi
+ movl 1048(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 824(%esp), %ecx
+ addl 784(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 796(%esp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 808(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 820(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 736(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 736(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 760(%esp), %edi
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 764(%esp), %ebp
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 768(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 1044(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ movl 728(%esp), %eax
+ movl 28(%esp), %edx # 4-byte Reload
+ addl 688(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 704(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 708(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 712(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 716(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 720(%esp), %ebp
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 724(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1052(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ addl 640(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 656(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 672(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 676(%esp), %esi
+ movl %esi, %ebp
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 680(%esp), %esi
+ movl 1048(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 632(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 592(%esp), %ecx
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 604(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 624(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 628(%esp), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 544(%esp), %esi
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 548(%esp), %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 552(%esp), %esi
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 556(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 536(%esp), %edx
+ addl 496(%esp), %edi
+ adcl 500(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 504(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 528(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 448(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 448(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 464(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 480(%esp), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %esi # 4-byte Reload
+ adcl 488(%esp), %esi
+ movl 1048(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 400(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 440(%esp), %eax
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 400(%esp), %ecx
+ adcl 404(%esp), %ebp
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 408(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 412(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 416(%esp), %edi
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 420(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 424(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 428(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 432(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 436(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 352(%esp), %esi
+ adcl 356(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 360(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 368(%esp), %esi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 372(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1044(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ movl 344(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 304(%esp), %ecx
+ adcl 308(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 316(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 320(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 324(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 256(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 272(%esp), %edi
+ adcl 276(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 284(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %esi # 4-byte Reload
+ adcl 288(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 248(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 208(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 220(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 224(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 236(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 160(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ adcl 176(%esp), %edi
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 180(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 192(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 152(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 112(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 124(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 128(%esp), %esi
+ movl %esi, %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 140(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 144(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 64(%esp), %ebp
+ movl %edi, %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 68(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebx
+ adcl 80(%esp), %ebp
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 96(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1052(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %ecx
+ movl %ebx, %eax
+ sbbl 8(%edi), %eax
+ movl %ebp, %esi
+ sbbl 12(%edi), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 16(%edi), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 24(%esp), %esi # 4-byte Reload
+ sbbl 20(%edi), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ sbbl 24(%edi), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 28(%edi), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 32(%edi), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 36(%edi), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %esi, %edi
+ sarl $31, %edi
+ testl %edi, %edi
+ movl 60(%esp), %edi # 4-byte Reload
+ js .LBB147_2
+# BB#1:
+ movl %edx, %edi
+.LBB147_2:
+ movl 1040(%esp), %edx
+ movl %edi, (%edx)
+ movl 52(%esp), %edi # 4-byte Reload
+ js .LBB147_4
+# BB#3:
+ movl %ecx, %edi
+.LBB147_4:
+ movl %edi, 4(%edx)
+ js .LBB147_6
+# BB#5:
+ movl %eax, %ebx
+.LBB147_6:
+ movl %ebx, 8(%edx)
+ js .LBB147_8
+# BB#7:
+ movl 4(%esp), %ebp # 4-byte Reload
+.LBB147_8:
+ movl %ebp, 12(%edx)
+ movl 44(%esp), %esi # 4-byte Reload
+ movl 24(%esp), %eax # 4-byte Reload
+ js .LBB147_10
+# BB#9:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB147_10:
+ movl %esi, 16(%edx)
+ js .LBB147_12
+# BB#11:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB147_12:
+ movl %eax, 20(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB147_14
+# BB#13:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB147_14:
+ movl %eax, 24(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB147_16
+# BB#15:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB147_16:
+ movl %eax, 28(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB147_18
+# BB#17:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB147_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB147_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB147_20:
+ movl %eax, 36(%edx)
+ addl $1020, %esp # imm = 0x3FC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end147:
+ .size mcl_fp_montNF10Lbmi2, .Lfunc_end147-mcl_fp_montNF10Lbmi2
+
+ .globl mcl_fp_montRed10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed10Lbmi2,@function
+mcl_fp_montRed10Lbmi2: # @mcl_fp_montRed10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $604, %esp # imm = 0x25C
+ calll .L148$pb
+.L148$pb:
+ popl %eax
+.Ltmp19:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp19-.L148$pb), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 632(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 628(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 76(%ecx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 48(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 44(%ecx), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 40(%ecx), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 28(%ecx), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 24(%ecx), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %edi
+ movl 12(%ecx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 8(%ecx), %esi
+ movl (%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 560(%esp), %ecx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 564(%esp), %ecx
+ adcl 568(%esp), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 576(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 580(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 68(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 512(%esp), %esi
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 516(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 520(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 524(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 528(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 532(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 536(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 540(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 544(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 548(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 552(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 464(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 492(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 416(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 440(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ movl 60(%esp), %edi # 4-byte Reload
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 632(%esp), %eax
+ movl %eax, %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 368(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 404(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 320(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 320(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %edi # 4-byte Reload
+ adcl 344(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 352(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 360(%esp), %esi
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 272(%esp), %ebp
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 276(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 288(%esp), %ebp
+ adcl 292(%esp), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 296(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 308(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 312(%esp), %esi
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 96(%esp), %eax # 4-byte Reload
+ addl 224(%esp), %eax
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 232(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 236(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 240(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl 244(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 248(%esp), %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 256(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 260(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 264(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %eax, %edi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 176(%esp), %edi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 180(%esp), %ecx
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 184(%esp), %edi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 196(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 128(%esp), %esi
+ movl %edi, %eax
+ adcl 132(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 140(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ adcl 144(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 76(%esp), %ebx # 4-byte Reload
+ adcl 164(%esp), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ subl 12(%esp), %edi # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ sbbl 16(%esp), %esi # 4-byte Folded Reload
+ sbbl 20(%esp), %edx # 4-byte Folded Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ sbbl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 96(%esp) # 4-byte Spill
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB148_2
+# BB#1:
+ movl %edx, 80(%esp) # 4-byte Spill
+.LBB148_2:
+ testb %al, %al
+ movl 112(%esp), %edx # 4-byte Reload
+ jne .LBB148_4
+# BB#3:
+ movl %edi, %edx
+.LBB148_4:
+ movl 624(%esp), %edi
+ movl %edx, (%edi)
+ movl 108(%esp), %edx # 4-byte Reload
+ jne .LBB148_6
+# BB#5:
+ movl %ecx, 124(%esp) # 4-byte Spill
+.LBB148_6:
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%edi)
+ movl 116(%esp), %ecx # 4-byte Reload
+ jne .LBB148_8
+# BB#7:
+ movl %esi, %ecx
+.LBB148_8:
+ movl %ecx, 8(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edi)
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ jne .LBB148_10
+# BB#9:
+ movl 64(%esp), %eax # 4-byte Reload
+.LBB148_10:
+ movl %eax, 16(%edi)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl 104(%esp), %ebp # 4-byte Reload
+ jne .LBB148_12
+# BB#11:
+ movl 68(%esp), %ebp # 4-byte Reload
+.LBB148_12:
+ movl %ebp, 20(%edi)
+ movl 88(%esp), %ebx # 4-byte Reload
+ jne .LBB148_14
+# BB#13:
+ movl 72(%esp), %ebx # 4-byte Reload
+.LBB148_14:
+ movl %ebx, 24(%edi)
+ jne .LBB148_16
+# BB#15:
+ movl 92(%esp), %edx # 4-byte Reload
+.LBB148_16:
+ movl %edx, 28(%edi)
+ jne .LBB148_18
+# BB#17:
+ movl 100(%esp), %ecx # 4-byte Reload
+.LBB148_18:
+ movl %ecx, 32(%edi)
+ jne .LBB148_20
+# BB#19:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB148_20:
+ movl %eax, 36(%edi)
+ addl $604, %esp # imm = 0x25C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end148:
+ .size mcl_fp_montRed10Lbmi2, .Lfunc_end148-mcl_fp_montRed10Lbmi2
+
+ .globl mcl_fp_addPre10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre10Lbmi2,@function
+mcl_fp_addPre10Lbmi2: # @mcl_fp_addPre10Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl %edx, 28(%ebx)
+ movl %esi, 32(%ebx)
+ movl 36(%eax), %eax
+ movl 36(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 36(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end149:
+ .size mcl_fp_addPre10Lbmi2, .Lfunc_end149-mcl_fp_addPre10Lbmi2
+
+ .globl mcl_fp_subPre10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre10Lbmi2,@function
+mcl_fp_subPre10Lbmi2: # @mcl_fp_subPre10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl %esi, 28(%ebp)
+ movl %edi, 32(%ebp)
+ movl 36(%edx), %edx
+ movl 36(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 36(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end150:
+ .size mcl_fp_subPre10Lbmi2, .Lfunc_end150-mcl_fp_subPre10Lbmi2
+
+ .globl mcl_fp_shr1_10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_10Lbmi2,@function
+mcl_fp_shr1_10Lbmi2: # @mcl_fp_shr1_10Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 24(%esi)
+ movl 32(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 28(%esi)
+ movl 36(%eax), %eax
+ shrdl $1, %eax, %ecx
+ movl %ecx, 32(%esi)
+ shrl %eax
+ movl %eax, 36(%esi)
+ popl %esi
+ retl
+.Lfunc_end151:
+ .size mcl_fp_shr1_10Lbmi2, .Lfunc_end151-mcl_fp_shr1_10Lbmi2
+
+ .globl mcl_fp_add10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add10Lbmi2,@function
+mcl_fp_add10Lbmi2: # @mcl_fp_add10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 52(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 48(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl 4(%ebx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %eax
+ adcl 12(%edi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ adcl 20(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%ebx), %esi
+ adcl 24(%edi), %esi
+ movl 28(%ebx), %ebp
+ adcl 28(%edi), %ebp
+ movl 32(%ebx), %edx
+ adcl 32(%edi), %edx
+ movl 36(%ebx), %ecx
+ adcl 36(%edi), %ecx
+ movl 44(%esp), %edi
+ movl (%esp), %ebx # 4-byte Reload
+ movl %ebx, (%edi)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%edi)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edi)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%edi)
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%edi)
+ movl %esi, 24(%edi)
+ movl %ebp, 28(%edi)
+ movl %edx, 32(%edi)
+ movl %ecx, 36(%edi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 56(%esp), %edi
+ subl (%edi), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 20(%esp), %ebx # 4-byte Reload
+ sbbl 4(%edi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %ebx # 4-byte Reload
+ sbbl 8(%edi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ sbbl 12(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebx # 4-byte Reload
+ sbbl 16(%edi), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 4(%esp), %ebx # 4-byte Reload
+ sbbl 20(%edi), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ sbbl 24(%edi), %esi
+ sbbl 28(%edi), %ebp
+ sbbl 32(%edi), %edx
+ sbbl 36(%edi), %ecx
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB152_2
+# BB#1: # %nocarry
+ movl (%esp), %edi # 4-byte Reload
+ movl 44(%esp), %ebx
+ movl %edi, (%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebx)
+ movl %esi, 24(%ebx)
+ movl %ebp, 28(%ebx)
+ movl %edx, 32(%ebx)
+ movl %ecx, 36(%ebx)
+.LBB152_2: # %carry
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end152:
+ .size mcl_fp_add10Lbmi2, .Lfunc_end152-mcl_fp_add10Lbmi2
+
+ .globl mcl_fp_addNF10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF10Lbmi2,@function
+mcl_fp_addNF10Lbmi2: # @mcl_fp_addNF10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 100(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %esi
+ movl 96(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 4(%edx), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 36(%ecx), %edi
+ movl 32(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %ebx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %esi
+ adcl 8(%edx), %esi
+ adcl 12(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 16(%edx), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 20(%edx), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 24(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 28(%edx), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 32(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ adcl 36(%edx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 104(%esp), %edi
+ movl 52(%esp), %edx # 4-byte Reload
+ subl (%edi), %edx
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 4(%edi), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %ecx, %esi
+ sbbl 8(%edi), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ sbbl 20(%edi), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ movl %esi, %ebp
+ sbbl 24(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ movl %esi, %ebx
+ sbbl 32(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, %esi
+ sbbl 36(%edi), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %esi, %edi
+ movl 52(%esp), %esi # 4-byte Reload
+ sarl $31, %edi
+ testl %edi, %edi
+ js .LBB153_2
+# BB#1:
+ movl %edx, %esi
+.LBB153_2:
+ movl 92(%esp), %edx
+ movl %esi, (%edx)
+ movl 56(%esp), %esi # 4-byte Reload
+ js .LBB153_4
+# BB#3:
+ movl (%esp), %esi # 4-byte Reload
+.LBB153_4:
+ movl %esi, 4(%edx)
+ movl %ebp, %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ js .LBB153_6
+# BB#5:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB153_6:
+ movl %ecx, 8(%edx)
+ movl %ebx, %ecx
+ movl 44(%esp), %ebp # 4-byte Reload
+ js .LBB153_8
+# BB#7:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB153_8:
+ movl %esi, 12(%edx)
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 48(%esp), %ebx # 4-byte Reload
+ js .LBB153_10
+# BB#9:
+ movl 12(%esp), %ebp # 4-byte Reload
+.LBB153_10:
+ movl %ebp, 16(%edx)
+ js .LBB153_12
+# BB#11:
+ movl 16(%esp), %ebx # 4-byte Reload
+.LBB153_12:
+ movl %ebx, 20(%edx)
+ js .LBB153_14
+# BB#13:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB153_14:
+ movl %edi, 24(%edx)
+ js .LBB153_16
+# BB#15:
+ movl 24(%esp), %esi # 4-byte Reload
+.LBB153_16:
+ movl %esi, 28(%edx)
+ js .LBB153_18
+# BB#17:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB153_18:
+ movl %ecx, 32(%edx)
+ js .LBB153_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB153_20:
+ movl %eax, 36(%edx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end153:
+ .size mcl_fp_addNF10Lbmi2, .Lfunc_end153-mcl_fp_addNF10Lbmi2
+
+ .globl mcl_fp_sub10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub10Lbmi2,@function
+mcl_fp_sub10Lbmi2: # @mcl_fp_sub10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ xorl %ebx, %ebx
+ movl 60(%esp), %edi
+ subl (%edi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 20(%esi), %edx
+ sbbl 20(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 24(%esi), %ecx
+ sbbl 24(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 32(%esi), %ebp
+ sbbl 32(%edi), %ebp
+ movl 36(%esi), %esi
+ sbbl 36(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 52(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl %edx, 20(%ebx)
+ movl %ecx, 24(%ebx)
+ movl %eax, 28(%ebx)
+ movl %ebp, 32(%ebx)
+ movl %esi, 36(%ebx)
+ je .LBB154_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 64(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl %eax, 28(%ebx)
+ movl 32(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 32(%ebx)
+ movl 36(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 36(%ebx)
+.LBB154_2: # %nocarry
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end154:
+ .size mcl_fp_sub10Lbmi2, .Lfunc_end154-mcl_fp_sub10Lbmi2
+
+ .globl mcl_fp_subNF10Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF10Lbmi2,@function
+mcl_fp_subNF10Lbmi2: # @mcl_fp_subNF10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 80(%esp), %eax
+ movl 36(%eax), %esi
+ movl (%eax), %edi
+ movl 4(%eax), %edx
+ movl 84(%esp), %ecx
+ subl (%ecx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sbbl 4(%ecx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 32(%eax), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl 24(%eax), %ebx
+ movl 20(%eax), %ebp
+ movl 16(%eax), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 12(%eax), %edx
+ movl 8(%eax), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 16(%ecx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ sbbl 28(%ecx), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ sbbl 32(%ecx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl 36(%ecx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ sarl $31, %eax
+ movl %eax, %edx
+ addl %edx, %edx
+ movl %eax, %ecx
+ adcl %ecx, %ecx
+ movl %esi, %ebx
+ shrl $31, %ebx
+ orl %edx, %ebx
+ movl 88(%esp), %edi
+ movl 20(%edi), %edx
+ andl %ecx, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 12(%edi), %edx
+ andl %ecx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ andl 4(%edi), %ecx
+ movl 16(%edi), %edx
+ andl %ebx, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 8(%edi), %edx
+ andl %ebx, %edx
+ andl (%edi), %ebx
+ movl 36(%edi), %esi
+ andl %eax, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 32(%edi), %ebp
+ andl %eax, %ebp
+ movl 28(%edi), %esi
+ andl %eax, %esi
+ andl 24(%edi), %eax
+ addl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %edi
+ movl %ebx, (%edi)
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 4(%edi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 8(%edi)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 12(%edi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 16(%edi)
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 20(%edi)
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 24(%edi)
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %esi, 28(%edi)
+ movl %ebp, 32(%edi)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%edi)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end155:
+ .size mcl_fp_subNF10Lbmi2, .Lfunc_end155-mcl_fp_subNF10Lbmi2
+
+ .globl mcl_fpDbl_add10Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add10Lbmi2,@function
+mcl_fpDbl_add10Lbmi2: # @mcl_fpDbl_add10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 100(%esp), %edx
+ movl 96(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %ecx
+ movl 8(%edx), %ebx
+ movl (%edx), %ebp
+ addl (%edi), %ebp
+ movl 92(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%edx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%edx), %esi
+ adcl 16(%edx), %ecx
+ movl %ebp, 4(%eax)
+ movl 48(%edx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %ecx, 16(%eax)
+ movl 24(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 28(%edx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %ecx, 24(%eax)
+ movl 32(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 36(%edx), %ebx
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %ecx, 32(%eax)
+ movl 40(%edi), %ecx
+ adcl %ebx, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 44(%edx), %ebx
+ movl %esi, 36(%eax)
+ movl 44(%edi), %eax
+ adcl %ebx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 48(%edi), %eax
+ adcl %ebp, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ movl 52(%edi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 56(%edx), %eax
+ movl 56(%edi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 60(%edx), %eax
+ movl 60(%edi), %ecx
+ adcl %eax, %ecx
+ movl 64(%edx), %esi
+ movl 64(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 68(%edx), %ebx
+ movl 68(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 72(%edx), %ebx
+ movl 72(%edi), %ebp
+ adcl %ebx, %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 76(%edx), %edx
+ movl 76(%edi), %edi
+ adcl %edx, %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 104(%esp), %ebx
+ movl 64(%esp), %edi # 4-byte Reload
+ subl (%ebx), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebx), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebx), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ sbbl 20(%ebx), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 24(%ebx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ sbbl 28(%ebx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 36(%esp), %ebp # 4-byte Reload
+ sbbl 32(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ sbbl 36(%ebx), %edi
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB156_2
+# BB#1:
+ movl %edi, %ebp
+.LBB156_2:
+ testb %dl, %dl
+ movl 64(%esp), %edx # 4-byte Reload
+ movl 60(%esp), %esi # 4-byte Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ jne .LBB156_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB156_4:
+ movl 92(%esp), %eax
+ movl %edx, 40(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ movl %edx, 44(%eax)
+ movl %ebx, 48(%eax)
+ movl %edi, 52(%eax)
+ movl %esi, 56(%eax)
+ movl %ecx, 60(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB156_6
+# BB#5:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB156_6:
+ movl %ecx, 64(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB156_8
+# BB#7:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB156_8:
+ movl %edx, 68(%eax)
+ jne .LBB156_10
+# BB#9:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB156_10:
+ movl %ecx, 72(%eax)
+ movl %ebp, 76(%eax)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end156:
+ .size mcl_fpDbl_add10Lbmi2, .Lfunc_end156-mcl_fpDbl_add10Lbmi2
+
+ .globl mcl_fpDbl_sub10Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub10Lbmi2,@function
+mcl_fpDbl_sub10Lbmi2: # @mcl_fpDbl_sub10Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ebp
+ movl (%ebp), %edx
+ movl 4(%ebp), %esi
+ movl 88(%esp), %eax
+ subl (%eax), %edx
+ sbbl 4(%eax), %esi
+ movl 8(%ebp), %edi
+ sbbl 8(%eax), %edi
+ movl 80(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 12(%ebp), %edx
+ sbbl 12(%eax), %edx
+ movl %esi, 4(%ecx)
+ movl 16(%ebp), %esi
+ sbbl 16(%eax), %esi
+ movl %edi, 8(%ecx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ecx)
+ movl 20(%ebp), %edx
+ sbbl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ecx)
+ movl 24(%ebp), %esi
+ sbbl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ecx)
+ movl 28(%ebp), %edx
+ sbbl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ecx)
+ movl 32(%ebp), %esi
+ sbbl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ecx)
+ movl 36(%ebp), %edx
+ sbbl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ecx)
+ movl 40(%ebp), %esi
+ sbbl %edi, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 44(%eax), %esi
+ movl %edx, 36(%ecx)
+ movl 44(%ebp), %edx
+ sbbl %esi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%eax), %edx
+ movl 48(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 52(%eax), %edx
+ movl 52(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 56(%eax), %edx
+ movl 56(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 60(%eax), %edx
+ movl 60(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 64(%eax), %edx
+ movl 64(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 68(%eax), %edx
+ movl 68(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 72(%eax), %edx
+ movl 72(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 76(%eax), %eax
+ movl 76(%ebp), %edx
+ sbbl %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 92(%esp), %esi
+ jne .LBB157_1
+# BB#2:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB157_3
+.LBB157_1:
+ movl 36(%esi), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+.LBB157_3:
+ testb %al, %al
+ jne .LBB157_4
+# BB#5:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ movl $0, %ebx
+ jmp .LBB157_6
+.LBB157_4:
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB157_6:
+ jne .LBB157_7
+# BB#8:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB157_9
+.LBB157_7:
+ movl 32(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB157_9:
+ jne .LBB157_10
+# BB#11:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB157_12
+.LBB157_10:
+ movl 28(%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB157_12:
+ jne .LBB157_13
+# BB#14:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB157_15
+.LBB157_13:
+ movl 24(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB157_15:
+ jne .LBB157_16
+# BB#17:
+ movl $0, %ebp
+ jmp .LBB157_18
+.LBB157_16:
+ movl 20(%esi), %ebp
+.LBB157_18:
+ jne .LBB157_19
+# BB#20:
+ movl $0, %eax
+ jmp .LBB157_21
+.LBB157_19:
+ movl 16(%esi), %eax
+.LBB157_21:
+ jne .LBB157_22
+# BB#23:
+ movl $0, %edx
+ jmp .LBB157_24
+.LBB157_22:
+ movl 12(%esi), %edx
+.LBB157_24:
+ jne .LBB157_25
+# BB#26:
+ xorl %esi, %esi
+ jmp .LBB157_27
+.LBB157_25:
+ movl 8(%esi), %esi
+.LBB157_27:
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 40(%ecx)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 44(%ecx)
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 52(%ecx)
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 56(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 60(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 68(%ecx)
+ movl %eax, 72(%ecx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%ecx)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end157:
+ .size mcl_fpDbl_sub10Lbmi2, .Lfunc_end157-mcl_fpDbl_sub10Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv352x32,@function
+.LmulPv352x32: # @mulPv352x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl %edx, %eax
+ movl 52(%esp), %edx
+ mulxl 4(%eax), %ebx, %esi
+ mulxl (%eax), %edi, %ebp
+ movl %edi, 28(%esp) # 4-byte Spill
+ addl %ebx, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ mulxl 8(%eax), %edi, %ebx
+ adcl %esi, %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ mulxl 12(%eax), %esi, %edi
+ adcl %ebx, %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ mulxl 16(%eax), %esi, %ebx
+ adcl %edi, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ mulxl 20(%eax), %esi, %edi
+ adcl %ebx, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ mulxl 24(%eax), %ebx, %esi
+ adcl %edi, %ebx
+ mulxl 28(%eax), %edi, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ mulxl 32(%eax), %esi, %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ mulxl 36(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 28(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%ecx)
+ movl %ebx, 24(%ecx)
+ movl %edi, 28(%ecx)
+ movl %esi, 32(%ecx)
+ movl %edx, 36(%ecx)
+ movl 52(%esp), %edx
+ mulxl 40(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ adcl $0, %edx
+ movl %edx, 44(%ecx)
+ movl %ecx, %eax
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end158:
+ .size .LmulPv352x32, .Lfunc_end158-.LmulPv352x32
+
+ .globl mcl_fp_mulUnitPre11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre11Lbmi2,@function
+mcl_fp_mulUnitPre11Lbmi2: # @mcl_fp_mulUnitPre11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L159$pb
+.L159$pb:
+ popl %ebx
+.Ltmp20:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp20-.L159$pb), %ebx
+ movl 120(%esp), %eax
+ movl %eax, (%esp)
+ leal 40(%esp), %ecx
+ movl 116(%esp), %edx
+ calll .LmulPv352x32
+ movl 84(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp
+ movl 56(%esp), %ebx
+ movl 52(%esp), %edi
+ movl 48(%esp), %esi
+ movl 40(%esp), %edx
+ movl 44(%esp), %ecx
+ movl 112(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end159:
+ .size mcl_fp_mulUnitPre11Lbmi2, .Lfunc_end159-mcl_fp_mulUnitPre11Lbmi2
+
+ .globl mcl_fpDbl_mulPre11Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre11Lbmi2,@function
+mcl_fpDbl_mulPre11Lbmi2: # @mcl_fpDbl_mulPre11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $620, %esp # imm = 0x26C
+ calll .L160$pb
+.L160$pb:
+ popl %eax
+.Ltmp21:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp21-.L160$pb), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 648(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 644(%esp), %edx
+ movl %edx, %ebp
+ movl %ebx, %edi
+ calll .LmulPv352x32
+ movl 612(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 584(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 580(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 568(%esp), %eax
+ movl 572(%esp), %esi
+ movl 640(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 648(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 520(%esp), %ecx
+ movl %ebp, %edx
+ movl %edi, %ebx
+ calll .LmulPv352x32
+ addl 520(%esp), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 540(%esp), %ebx
+ movl 536(%esp), %edi
+ movl 532(%esp), %esi
+ movl 524(%esp), %ecx
+ movl 528(%esp), %edx
+ movl 640(%esp), %eax
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 472(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 516(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 504(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 492(%esp), %ebp
+ movl 488(%esp), %edi
+ movl 484(%esp), %esi
+ movl 476(%esp), %ecx
+ movl 480(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 444(%esp), %ebx
+ movl 440(%esp), %edi
+ movl 436(%esp), %esi
+ movl 428(%esp), %ecx
+ movl 432(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 376(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 396(%esp), %ebp
+ movl 392(%esp), %edi
+ movl 388(%esp), %esi
+ movl 380(%esp), %ecx
+ movl 384(%esp), %edx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 328(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 348(%esp), %ebx
+ movl 344(%esp), %edi
+ movl 340(%esp), %esi
+ movl 332(%esp), %ecx
+ movl 336(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 280(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 300(%esp), %ebp
+ movl 296(%esp), %edi
+ movl 292(%esp), %esi
+ movl 284(%esp), %ecx
+ movl 288(%esp), %edx
+ movl 640(%esp), %eax
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 232(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 252(%esp), %ebx
+ movl 248(%esp), %edi
+ movl 244(%esp), %esi
+ movl 236(%esp), %ecx
+ movl 240(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 184(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 204(%esp), %ebp
+ movl 200(%esp), %edi
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 640(%esp), %eax
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %edi
+ movl 36(%edi), %eax
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 644(%esp), %eax
+ movl %eax, %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 136(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 164(%esp), %ebp
+ movl 160(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 156(%esp), %edi
+ movl 152(%esp), %esi
+ movl 148(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 36(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 88(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 92(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 112(%esp), %edi
+ movl 108(%esp), %esi
+ movl 104(%esp), %edx
+ movl 100(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 40(%eax)
+ movl %ebp, 44(%eax)
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 60(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 64(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ movl %ecx, 76(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ addl $620, %esp # imm = 0x26C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end160:
+ .size mcl_fpDbl_mulPre11Lbmi2, .Lfunc_end160-mcl_fpDbl_mulPre11Lbmi2
+
+ .globl mcl_fpDbl_sqrPre11Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre11Lbmi2,@function
+mcl_fpDbl_sqrPre11Lbmi2: # @mcl_fpDbl_sqrPre11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $620, %esp # imm = 0x26C
+ calll .L161$pb
+.L161$pb:
+ popl %ebx
+.Ltmp22:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp22-.L161$pb), %ebx
+ movl %ebx, 84(%esp) # 4-byte Spill
+ movl 644(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl %edx, %esi
+ movl %ebx, %edi
+ calll .LmulPv352x32
+ movl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 584(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 580(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 568(%esp), %eax
+ movl 572(%esp), %ebp
+ movl 640(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %esi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 520(%esp), %ecx
+ movl %edi, %ebx
+ calll .LmulPv352x32
+ addl 520(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 540(%esp), %ebx
+ movl 536(%esp), %edi
+ movl 532(%esp), %esi
+ movl 524(%esp), %ecx
+ movl 528(%esp), %edx
+ movl 640(%esp), %eax
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 472(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 516(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 508(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 504(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 492(%esp), %ebp
+ movl 488(%esp), %edi
+ movl 484(%esp), %esi
+ movl 476(%esp), %ecx
+ movl 480(%esp), %edx
+ movl 640(%esp), %eax
+ movl 60(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 48(%esp), %eax # 4-byte Reload
+ addl 424(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 448(%esp), %ebx
+ movl 444(%esp), %edi
+ movl 440(%esp), %esi
+ movl 436(%esp), %edx
+ movl 428(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 432(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 80(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 80(%esp), %eax # 4-byte Reload
+ addl 376(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 404(%esp), %ebx
+ movl 400(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 396(%esp), %edi
+ movl 392(%esp), %esi
+ movl 388(%esp), %edx
+ movl 380(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 384(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 80(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 328(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 348(%esp), %ebp
+ movl 344(%esp), %edi
+ movl 340(%esp), %esi
+ movl 332(%esp), %ecx
+ movl 336(%esp), %edx
+ movl 640(%esp), %eax
+ movl 48(%esp), %ebx # 4-byte Reload
+ movl %ebx, 20(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 48(%esp), %eax # 4-byte Reload
+ addl 280(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 304(%esp), %ebx
+ movl 300(%esp), %edi
+ movl 296(%esp), %esi
+ movl 292(%esp), %edx
+ movl 284(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 288(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 80(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 80(%esp), %eax # 4-byte Reload
+ addl 232(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 260(%esp), %ebx
+ movl 256(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 252(%esp), %edi
+ movl 248(%esp), %esi
+ movl 244(%esp), %edx
+ movl 236(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 240(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 80(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 184(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 204(%esp), %ebp
+ movl 200(%esp), %edi
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 136(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 164(%esp), %ebp
+ movl 160(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 156(%esp), %edi
+ movl 152(%esp), %esi
+ movl 148(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 36(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 88(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 92(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 112(%esp), %edi
+ movl 108(%esp), %esi
+ movl 104(%esp), %edx
+ movl 100(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 40(%eax)
+ movl %ebp, 44(%eax)
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 60(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 64(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ movl %ecx, 76(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ addl $620, %esp # imm = 0x26C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end161:
+ .size mcl_fpDbl_sqrPre11Lbmi2, .Lfunc_end161-mcl_fpDbl_sqrPre11Lbmi2
+
+ .globl mcl_fp_mont11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont11Lbmi2,@function
+mcl_fp_mont11Lbmi2: # @mcl_fp_mont11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1132, %esp # imm = 0x46C
+ calll .L162$pb
+.L162$pb:
+ popl %ebx
+.Ltmp23:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp23-.L162$pb), %ebx
+ movl 1164(%esp), %eax
+ movl -4(%eax), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1080(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 1080(%esp), %edi
+ movl 1084(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %ebp, %eax
+ movl 1124(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1120(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1116(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1112(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1108(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 1100(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 1096(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1092(%esp), %esi
+ movl 1088(%esp), %ebp
+ movl %eax, (%esp)
+ leal 1032(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 1032(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1040(%esp), %ebp
+ adcl 1044(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1076(%esp), %esi
+ sbbl %edi, %edi
+ movl 1160(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 984(%esp), %ecx
+ adcl 988(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1024(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 1028(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 936(%esp), %esi
+ adcl 940(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 964(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 980(%esp), %esi
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ addl 888(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 912(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 928(%esp), %esi
+ movl %esi, %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ebp, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 840(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ movl %esi, %eax
+ andl $1, %eax
+ addl 840(%esp), %ebp
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 844(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 848(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 852(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 856(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 864(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 868(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 872(%esp), %ebp
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 876(%esp), %esi
+ adcl 880(%esp), %edi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 884(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 792(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 792(%esp), %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 820(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl 824(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 828(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 836(%esp), %esi
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 744(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 744(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 776(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 788(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1156(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 696(%esp), %ecx
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 716(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 724(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 728(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 648(%esp), %ebp
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 676(%esp), %edi
+ adcl 680(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 692(%esp), %esi
+ adcl $0, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 600(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 24(%esp), %ecx # 4-byte Reload
+ addl 600(%esp), %ecx
+ adcl 604(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 608(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 624(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 640(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %esi
+ movl %esi, %eax
+ addl 552(%esp), %edi
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ adcl 560(%esp), %edi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 564(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 568(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 572(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 576(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 580(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 584(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 588(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 592(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 596(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 28(%esp), %ecx # 4-byte Reload
+ addl 504(%esp), %ecx
+ adcl 508(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 520(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 532(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 536(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 456(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 456(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 460(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 464(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 484(%esp), %edi
+ adcl 488(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 496(%esp), %esi
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 408(%esp), %ecx
+ adcl 412(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 428(%esp), %ebp
+ adcl 432(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 444(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 360(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 368(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 380(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 384(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 312(%esp), %ecx
+ adcl 316(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 332(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 340(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 348(%esp), %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 264(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 276(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 284(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 300(%esp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 304(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 216(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 224(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 232(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 252(%esp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ movl %esi, %ecx
+ andl $1, %ecx
+ addl 168(%esp), %ebp
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 172(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 184(%esp), %ebp
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 188(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl %esi, %ecx
+ addl 120(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 136(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 156(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 20(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ andl $1, %esi
+ addl 72(%esp), %edi
+ movl 48(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 88(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl 1164(%esp), %ebp
+ subl (%ebp), %eax
+ movl %ecx, %edx
+ sbbl 4(%ebp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ sbbl 12(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 28(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ebp), %ebx
+ movl 32(%esp), %edi # 4-byte Reload
+ sbbl 32(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ sbbl 36(%ebp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 40(%ebp), %edi
+ movl %edi, %ebp
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB162_2
+# BB#1:
+ movl %ebx, 28(%esp) # 4-byte Spill
+.LBB162_2:
+ movl %esi, %ebx
+ testb %bl, %bl
+ movl 68(%esp), %ebx # 4-byte Reload
+ jne .LBB162_4
+# BB#3:
+ movl %eax, %ebx
+.LBB162_4:
+ movl 1152(%esp), %eax
+ movl %ebx, (%eax)
+ movl 56(%esp), %edi # 4-byte Reload
+ jne .LBB162_6
+# BB#5:
+ movl %edx, %edi
+.LBB162_6:
+ movl %edi, 4(%eax)
+ movl 52(%esp), %edx # 4-byte Reload
+ jne .LBB162_8
+# BB#7:
+ movl %ecx, %edx
+.LBB162_8:
+ movl %edx, 8(%eax)
+ jne .LBB162_10
+# BB#9:
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+.LBB162_10:
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB162_12
+# BB#11:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB162_12:
+ movl %ecx, 16(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB162_14
+# BB#13:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB162_14:
+ movl %ecx, 20(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ jne .LBB162_16
+# BB#15:
+ movl 16(%esp), %ecx # 4-byte Reload
+.LBB162_16:
+ movl %ecx, 24(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ jne .LBB162_18
+# BB#17:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB162_18:
+ movl %ecx, 32(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB162_20
+# BB#19:
+ movl 60(%esp), %ecx # 4-byte Reload
+.LBB162_20:
+ movl %ecx, 36(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB162_22
+# BB#21:
+ movl %ebp, %ecx
+.LBB162_22:
+ movl %ecx, 40(%eax)
+ addl $1132, %esp # imm = 0x46C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end162:
+ .size mcl_fp_mont11Lbmi2, .Lfunc_end162-mcl_fp_mont11Lbmi2
+
+ .globl mcl_fp_montNF11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF11Lbmi2,@function
+mcl_fp_montNF11Lbmi2: # @mcl_fp_montNF11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1132, %esp # imm = 0x46C
+ calll .L163$pb
+.L163$pb:
+ popl %ebx
+.Ltmp24:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp24-.L163$pb), %ebx
+ movl 1164(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1080(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 1080(%esp), %ebp
+ movl 1084(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 1124(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1120(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1116(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1112(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1108(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1100(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 1096(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1092(%esp), %esi
+ movl 1088(%esp), %edi
+ movl %eax, (%esp)
+ leal 1032(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 1032(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1040(%esp), %edi
+ adcl 1044(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 1048(%esp), %ebp
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 1052(%esp), %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 1028(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 984(%esp), %ecx
+ adcl 988(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 996(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 1000(%esp), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 1004(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ addl 936(%esp), %ebp
+ adcl 940(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 956(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 960(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 980(%esp), %ebp
+ movl 1160(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 932(%esp), %eax
+ addl 888(%esp), %edi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 892(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 896(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 900(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 904(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 908(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 912(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 916(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 920(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 924(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 928(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %edi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 840(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 840(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 860(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 872(%esp), %edi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 876(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 884(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 792(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 836(%esp), %eax
+ movl 44(%esp), %edx # 4-byte Reload
+ addl 792(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 796(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 800(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 804(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 808(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 812(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 816(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 820(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 824(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 828(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 832(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 744(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 744(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 764(%esp), %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 768(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 780(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 784(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 740(%esp), %edx
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 704(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 708(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 712(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 716(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 720(%esp), %edi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 724(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 728(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 732(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 736(%esp), %esi
+ adcl $0, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 648(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 672(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 688(%esp), %esi
+ movl %esi, %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 692(%esp), %esi
+ movl 1160(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 600(%esp), %ecx
+ movl 1156(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ movl 644(%esp), %eax
+ movl 28(%esp), %ecx # 4-byte Reload
+ addl 600(%esp), %ecx
+ adcl 604(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 608(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 612(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 616(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 620(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 624(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 628(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 632(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 636(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 640(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 552(%esp), %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 560(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 576(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 596(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 1160(%esp), %ecx
+ movl %ecx, %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 548(%esp), %edx
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 504(%esp), %eax
+ adcl 508(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 512(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 516(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 520(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 524(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 528(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 532(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 536(%esp), %edi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 540(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 544(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 456(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 456(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 468(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 480(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 488(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 496(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 452(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 408(%esp), %ecx
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 412(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 428(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 444(%esp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 448(%esp), %edi
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 360(%esp), %esi
+ adcl 364(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 372(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 400(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 356(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 312(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 320(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 332(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 340(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 264(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 276(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 284(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 292(%esp), %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 260(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 216(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 224(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 232(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 240(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 244(%esp), %ebp
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 168(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 176(%esp), %esi
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 180(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 196(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 204(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 164(%esp), %edx
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 120(%esp), %ecx
+ adcl 124(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 128(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 136(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 152(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 156(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 72(%esp), %edi
+ movl 48(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 84(%esp), %edi
+ adcl 88(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1164(%esp), %ebx
+ subl (%ebx), %edx
+ movl %ecx, %esi
+ sbbl 4(%ebx), %esi
+ movl %edi, %ecx
+ sbbl 8(%ebx), %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 12(%ebx), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ sbbl 16(%ebx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ sbbl 20(%ebx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ sbbl 24(%ebx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ sbbl 28(%ebx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ sbbl 32(%ebx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ sbbl 36(%ebx), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ sbbl 40(%ebx), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ movl 68(%esp), %ebx # 4-byte Reload
+ js .LBB163_2
+# BB#1:
+ movl %edx, %ebx
+.LBB163_2:
+ movl 1152(%esp), %edx
+ movl %ebx, (%edx)
+ movl 60(%esp), %ebp # 4-byte Reload
+ js .LBB163_4
+# BB#3:
+ movl %esi, %ebp
+.LBB163_4:
+ movl %ebp, 4(%edx)
+ js .LBB163_6
+# BB#5:
+ movl %ecx, %edi
+.LBB163_6:
+ movl %edi, 8(%edx)
+ movl 44(%esp), %ecx # 4-byte Reload
+ js .LBB163_8
+# BB#7:
+ movl %eax, %ecx
+.LBB163_8:
+ movl %ecx, 12(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB163_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB163_10:
+ movl %eax, 16(%edx)
+ movl 28(%esp), %eax # 4-byte Reload
+ js .LBB163_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB163_12:
+ movl %eax, 20(%edx)
+ movl 32(%esp), %eax # 4-byte Reload
+ js .LBB163_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB163_14:
+ movl %eax, 24(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB163_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB163_16:
+ movl %eax, 28(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB163_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB163_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB163_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB163_20:
+ movl %eax, 36(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB163_22
+# BB#21:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB163_22:
+ movl %eax, 40(%edx)
+ addl $1132, %esp # imm = 0x46C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end163:
+ .size mcl_fp_montNF11Lbmi2, .Lfunc_end163-mcl_fp_montNF11Lbmi2
+
+ .globl mcl_fp_montRed11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed11Lbmi2,@function
+mcl_fp_montRed11Lbmi2: # @mcl_fp_montRed11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $668, %esp # imm = 0x29C
+ calll .L164$pb
+.L164$pb:
+ popl %eax
+.Ltmp25:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp25-.L164$pb), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 696(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 692(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 4(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 84(%ecx), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 48(%ecx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 40(%ecx), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 32(%ecx), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 28(%ecx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 24(%ecx), %ebp
+ movl 20(%ecx), %edi
+ movl 16(%ecx), %esi
+ movl 12(%ecx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 616(%esp), %ecx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 616(%esp), %eax
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 620(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 632(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 636(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 640(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 568(%esp), %esi
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 572(%esp), %edx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 600(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 520(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 520(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 524(%esp), %ecx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 548(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 472(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 424(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 428(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 464(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 376(%esp), %esi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 380(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %esi # 4-byte Reload
+ adcl 404(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 412(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 328(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 344(%esp), %edi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 352(%esp), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 356(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 696(%esp), %eax
+ movl %eax, %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 280(%esp), %ebp
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 284(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 296(%esp), %edi
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl 304(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 232(%esp), %ebp
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 236(%esp), %ebp
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 244(%esp), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 276(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 184(%esp), %ebp
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 188(%esp), %ecx
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 136(%esp), %esi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 128(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 148(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 152(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ adcl 180(%esp), %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ subl 12(%esp), %edi # 4-byte Folded Reload
+ sbbl 4(%esp), %edx # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ sbbl 16(%esp), %esi # 4-byte Folded Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ sbbl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ sbbl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl %ebp, %ebx
+ sbbl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB164_2
+# BB#1:
+ movl %esi, 112(%esp) # 4-byte Spill
+.LBB164_2:
+ testb %bl, %bl
+ movl 132(%esp), %esi # 4-byte Reload
+ jne .LBB164_4
+# BB#3:
+ movl %edi, %esi
+.LBB164_4:
+ movl 688(%esp), %edi
+ movl %esi, (%edi)
+ movl 104(%esp), %esi # 4-byte Reload
+ jne .LBB164_6
+# BB#5:
+ movl %edx, 128(%esp) # 4-byte Spill
+.LBB164_6:
+ movl 128(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%edi)
+ movl 116(%esp), %edx # 4-byte Reload
+ jne .LBB164_8
+# BB#7:
+ movl %ecx, %edx
+.LBB164_8:
+ movl %edx, 8(%edi)
+ movl 112(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%edi)
+ movl 92(%esp), %edx # 4-byte Reload
+ movl 124(%esp), %ecx # 4-byte Reload
+ jne .LBB164_10
+# BB#9:
+ movl 64(%esp), %ecx # 4-byte Reload
+.LBB164_10:
+ movl %ecx, 16(%edi)
+ movl 96(%esp), %ecx # 4-byte Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ jne .LBB164_12
+# BB#11:
+ movl 68(%esp), %eax # 4-byte Reload
+.LBB164_12:
+ movl %eax, 20(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl 108(%esp), %ebp # 4-byte Reload
+ jne .LBB164_14
+# BB#13:
+ movl 72(%esp), %ebp # 4-byte Reload
+.LBB164_14:
+ movl %ebp, 24(%edi)
+ jne .LBB164_16
+# BB#15:
+ movl 76(%esp), %esi # 4-byte Reload
+.LBB164_16:
+ movl %esi, 28(%edi)
+ jne .LBB164_18
+# BB#17:
+ movl 84(%esp), %edx # 4-byte Reload
+.LBB164_18:
+ movl %edx, 32(%edi)
+ jne .LBB164_20
+# BB#19:
+ movl 88(%esp), %ecx # 4-byte Reload
+.LBB164_20:
+ movl %ecx, 36(%edi)
+ jne .LBB164_22
+# BB#21:
+ movl 100(%esp), %eax # 4-byte Reload
+.LBB164_22:
+ movl %eax, 40(%edi)
+ addl $668, %esp # imm = 0x29C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end164:
+ .size mcl_fp_montRed11Lbmi2, .Lfunc_end164-mcl_fp_montRed11Lbmi2
+
+ .globl mcl_fp_addPre11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre11Lbmi2,@function
+mcl_fp_addPre11Lbmi2: # @mcl_fp_addPre11Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl %esi, 32(%ebx)
+ movl %edx, 36(%ebx)
+ movl 40(%eax), %eax
+ movl 40(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 40(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end165:
+ .size mcl_fp_addPre11Lbmi2, .Lfunc_end165-mcl_fp_addPre11Lbmi2
+
+ .globl mcl_fp_subPre11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre11Lbmi2,@function
+mcl_fp_subPre11Lbmi2: # @mcl_fp_subPre11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edi, 32(%ebp)
+ movl %esi, 36(%ebp)
+ movl 40(%edx), %edx
+ movl 40(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 40(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end166:
+ .size mcl_fp_subPre11Lbmi2, .Lfunc_end166-mcl_fp_subPre11Lbmi2
+
+ .globl mcl_fp_shr1_11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_11Lbmi2,@function
+mcl_fp_shr1_11Lbmi2: # @mcl_fp_shr1_11Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 24(%esi)
+ movl 32(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 28(%esi)
+ movl 36(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 32(%esi)
+ movl 40(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 36(%esi)
+ shrl %eax
+ movl %eax, 40(%esi)
+ popl %esi
+ retl
+.Lfunc_end167:
+ .size mcl_fp_shr1_11Lbmi2, .Lfunc_end167-mcl_fp_shr1_11Lbmi2
+
+ .globl mcl_fp_add11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add11Lbmi2,@function
+mcl_fp_add11Lbmi2: # @mcl_fp_add11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 60(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 56(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl 4(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl 16(%esi), %ecx
+ adcl 12(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl 16(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ adcl 20(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ adcl 24(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 28(%esi), %ebx
+ adcl 28(%edi), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 32(%esi), %ecx
+ adcl 32(%edi), %ecx
+ movl 36(%esi), %eax
+ adcl 36(%edi), %eax
+ movl 40(%esi), %edx
+ adcl 40(%edi), %edx
+ movl 52(%esp), %esi
+ movl %ebp, (%esi)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%esi)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%esi)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%esi)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%esi)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%esi)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%esi)
+ movl %ebx, 28(%esi)
+ movl %ecx, 32(%esi)
+ movl %eax, 36(%esi)
+ movl %edx, 40(%esi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 64(%esp), %ebp
+ movl 4(%esp), %edi # 4-byte Reload
+ subl (%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl (%esp), %edi # 4-byte Reload
+ sbbl 28(%ebp), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 32(%ebp), %ecx
+ sbbl 36(%ebp), %eax
+ sbbl 40(%ebp), %edx
+ movl %edx, %edi
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB168_2
+# BB#1: # %nocarry
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%esi)
+ movl 28(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%esi)
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%esi)
+ movl 20(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%esi)
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%esi)
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 20(%esi)
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%esi)
+ movl (%esp), %edx # 4-byte Reload
+ movl %edx, 28(%esi)
+ movl %ecx, 32(%esi)
+ movl %eax, 36(%esi)
+ movl %edi, 40(%esi)
+.LBB168_2: # %carry
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end168:
+ .size mcl_fp_add11Lbmi2, .Lfunc_end168-mcl_fp_add11Lbmi2
+
+ .globl mcl_fp_addNF11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF11Lbmi2,@function
+mcl_fp_addNF11Lbmi2: # @mcl_fp_addNF11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $80, %esp
+ movl 108(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %ecx
+ movl 104(%esp), %esi
+ addl (%esi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 4(%esi), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%edx), %ebx
+ movl 36(%edx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 20(%edx), %ebp
+ movl 16(%edx), %edi
+ movl 12(%edx), %eax
+ movl 8(%edx), %ecx
+ adcl 8(%esi), %ecx
+ adcl 12(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 24(%esi), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 28(%esi), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 32(%esi), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 36(%esi), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ adcl 40(%esi), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx
+ movl 52(%esp), %esi # 4-byte Reload
+ subl (%ebx), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebx), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl %edx, %ecx
+ sbbl 8(%ebx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ sbbl 12(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%ebx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 24(%ebx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ movl %edi, %ecx
+ movl %edi, %ebp
+ sbbl 36(%ebx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ sbbl 40(%ebx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ movl 52(%esp), %edi # 4-byte Reload
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ js .LBB169_2
+# BB#1:
+ movl %esi, %edi
+.LBB169_2:
+ movl 100(%esp), %esi
+ movl %edi, (%esi)
+ movl 60(%esp), %edi # 4-byte Reload
+ js .LBB169_4
+# BB#3:
+ movl (%esp), %edi # 4-byte Reload
+.LBB169_4:
+ movl %edi, 4(%esi)
+ movl %eax, %edi
+ js .LBB169_6
+# BB#5:
+ movl 4(%esp), %edx # 4-byte Reload
+.LBB169_6:
+ movl %edx, 8(%esi)
+ movl %ebp, %ecx
+ movl 72(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB169_8
+# BB#7:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB169_8:
+ movl %eax, 12(%esi)
+ movl 76(%esp), %eax # 4-byte Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ js .LBB169_10
+# BB#9:
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+.LBB169_10:
+ movl 48(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%esi)
+ js .LBB169_12
+# BB#11:
+ movl 16(%esp), %ebp # 4-byte Reload
+.LBB169_12:
+ movl %ebp, 20(%esi)
+ js .LBB169_14
+# BB#13:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB169_14:
+ movl %edi, 24(%esi)
+ js .LBB169_16
+# BB#15:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB169_16:
+ movl %eax, 28(%esi)
+ js .LBB169_18
+# BB#17:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB169_18:
+ movl %edx, 32(%esi)
+ js .LBB169_20
+# BB#19:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB169_20:
+ movl %ecx, 36(%esi)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB169_22
+# BB#21:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB169_22:
+ movl %eax, 40(%esi)
+ addl $80, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end169:
+ .size mcl_fp_addNF11Lbmi2, .Lfunc_end169-mcl_fp_addNF11Lbmi2
+
+ .globl mcl_fp_sub11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub11Lbmi2,@function
+mcl_fp_sub11Lbmi2: # @mcl_fp_sub11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %ebp
+ movl (%ebp), %ecx
+ movl 4(%ebp), %eax
+ movl 68(%esp), %edi
+ subl (%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 8(%ebp), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%ebp), %ebx
+ sbbl 12(%edi), %ebx
+ movl 16(%ebp), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 20(%ebp), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 24(%ebp), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 28(%ebp), %edx
+ sbbl 28(%edi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 32(%ebp), %ecx
+ sbbl 32(%edi), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 36(%ebp), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 40(%ebp), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ movl 16(%esp), %esi # 4-byte Reload
+ movl $0, %ebx
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 60(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl %ebp, 12(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl %edx, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%ebx)
+ movl %ecx, %edi
+ movl %eax, 40(%ebx)
+ je .LBB170_2
+# BB#1: # %carry
+ movl 72(%esp), %eax
+ addl (%eax), %esi
+ movl %esi, (%ebx)
+ movl 28(%esp), %edx # 4-byte Reload
+ movl %eax, %esi
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl %ebp, %eax
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ movl 36(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 36(%ebx)
+ movl 40(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ebx)
+.LBB170_2: # %nocarry
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end170:
+ .size mcl_fp_sub11Lbmi2, .Lfunc_end170-mcl_fp_sub11Lbmi2
+
+ .globl mcl_fp_subNF11Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF11Lbmi2,@function
+mcl_fp_subNF11Lbmi2: # @mcl_fp_subNF11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl 88(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 92(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%eax), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 36(%eax), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 28(%eax), %ebx
+ movl 24(%eax), %ebp
+ movl 20(%eax), %esi
+ movl 16(%eax), %edx
+ movl 12(%eax), %ecx
+ movl 8(%eax), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 12(%edi), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ sarl $31, %ecx
+ movl %ecx, %edx
+ shldl $1, %eax, %edx
+ movl 96(%esp), %ebx
+ movl 4(%ebx), %eax
+ andl %edx, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ andl (%ebx), %edx
+ movl 40(%ebx), %eax
+ andl %ecx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 36(%ebx), %eax
+ andl %ecx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 32(%ebx), %eax
+ andl %ecx, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 28(%ebx), %eax
+ andl %ecx, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 24(%ebx), %ebp
+ andl %ecx, %ebp
+ rorxl $31, %ecx, %eax
+ andl 20(%ebx), %ecx
+ movl 16(%ebx), %edi
+ andl %eax, %edi
+ movl 12(%ebx), %esi
+ andl %eax, %esi
+ andl 8(%ebx), %eax
+ addl 40(%esp), %edx # 4-byte Folded Reload
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx
+ movl %edx, (%ebx)
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%ebx)
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 8(%ebx)
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 12(%ebx)
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 16(%ebx)
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ecx, 20(%ebx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 24(%ebx)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl %eax, 36(%ebx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ebx)
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end171:
+ .size mcl_fp_subNF11Lbmi2, .Lfunc_end171-mcl_fp_subNF11Lbmi2
+
+ .globl mcl_fpDbl_add11Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add11Lbmi2,@function
+mcl_fpDbl_add11Lbmi2: # @mcl_fpDbl_add11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $80, %esp
+ movl 108(%esp), %ecx
+ movl 104(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edi), %ebp
+ movl 100(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%ecx), %esi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 52(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%edi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%edi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebx, %esi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%edi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %esi, 36(%eax)
+ movl 44(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 48(%ecx), %esi
+ movl %edx, 40(%eax)
+ movl 48(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 52(%edi), %eax
+ adcl %ebp, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl 56(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%ecx), %edx
+ movl 60(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%ecx), %edx
+ movl 64(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl 68(%edi), %edx
+ adcl %eax, %edx
+ movl 72(%ecx), %esi
+ movl 72(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 76(%ecx), %ebx
+ movl 76(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 80(%ecx), %ebp
+ movl 80(%edi), %ebx
+ adcl %ebp, %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 84(%ecx), %ecx
+ movl 84(%edi), %edi
+ adcl %ecx, %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 112(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ subl (%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %edx, %edi
+ sbbl 24(%ebp), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 28(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl 32(%ebp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl 40(%esp), %ebx # 4-byte Reload
+ sbbl 36(%ebp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ sbbl 40(%ebp), %edi
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB172_2
+# BB#1:
+ movl %edi, %ebx
+.LBB172_2:
+ testb %cl, %cl
+ movl 68(%esp), %ecx # 4-byte Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ movl 60(%esp), %edi # 4-byte Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ jne .LBB172_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB172_4:
+ movl 100(%esp), %eax
+ movl %ecx, 44(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl %ebp, 56(%eax)
+ movl %edi, 60(%eax)
+ movl %esi, 64(%eax)
+ movl %edx, 68(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ jne .LBB172_6
+# BB#5:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB172_6:
+ movl %edx, 72(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ jne .LBB172_8
+# BB#7:
+ movl 32(%esp), %edx # 4-byte Reload
+.LBB172_8:
+ movl %edx, 76(%eax)
+ jne .LBB172_10
+# BB#9:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB172_10:
+ movl %ecx, 80(%eax)
+ movl %ebx, 84(%eax)
+ addl $80, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end172:
+ .size mcl_fpDbl_add11Lbmi2, .Lfunc_end172-mcl_fpDbl_add11Lbmi2
+
+ .globl mcl_fpDbl_sub11Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub11Lbmi2,@function
+mcl_fpDbl_sub11Lbmi2: # @mcl_fpDbl_sub11Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 96(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %esi
+ movl 100(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %esi
+ movl 8(%edx), %edi
+ sbbl 8(%ebp), %edi
+ movl 92(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edx), %eax
+ sbbl 12(%ebp), %eax
+ movl %esi, 4(%ecx)
+ movl 16(%edx), %esi
+ sbbl 16(%ebp), %esi
+ movl %edi, 8(%ecx)
+ movl 20(%ebp), %edi
+ movl %eax, 12(%ecx)
+ movl 20(%edx), %eax
+ sbbl %edi, %eax
+ movl 24(%ebp), %edi
+ movl %esi, 16(%ecx)
+ movl 24(%edx), %esi
+ sbbl %edi, %esi
+ movl 28(%ebp), %edi
+ movl %eax, 20(%ecx)
+ movl 28(%edx), %eax
+ sbbl %edi, %eax
+ movl 32(%ebp), %edi
+ movl %esi, 24(%ecx)
+ movl 32(%edx), %esi
+ sbbl %edi, %esi
+ movl 36(%ebp), %edi
+ movl %eax, 28(%ecx)
+ movl 36(%edx), %eax
+ sbbl %edi, %eax
+ movl 40(%ebp), %edi
+ movl %esi, 32(%ecx)
+ movl 40(%edx), %esi
+ sbbl %edi, %esi
+ movl 44(%ebp), %edi
+ movl %eax, 36(%ecx)
+ movl 44(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%ebp), %eax
+ movl %esi, 40(%ecx)
+ movl 48(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 52(%ebp), %eax
+ movl 52(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 56(%ebp), %eax
+ movl 56(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 60(%ebp), %eax
+ movl 60(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 64(%ebp), %eax
+ movl 64(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 68(%ebp), %eax
+ movl 68(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 72(%ebp), %eax
+ movl 72(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 76(%ebp), %eax
+ movl 76(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 80(%ebp), %eax
+ movl 80(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 84(%ebp), %eax
+ movl 84(%edx), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 104(%esp), %ebp
+ jne .LBB173_1
+# BB#2:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB173_3
+.LBB173_1:
+ movl 40(%ebp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+.LBB173_3:
+ testb %al, %al
+ jne .LBB173_4
+# BB#5:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB173_6
+.LBB173_4:
+ movl (%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 4(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB173_6:
+ jne .LBB173_7
+# BB#8:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB173_9
+.LBB173_7:
+ movl 36(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB173_9:
+ jne .LBB173_10
+# BB#11:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB173_12
+.LBB173_10:
+ movl 32(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB173_12:
+ jne .LBB173_13
+# BB#14:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB173_15
+.LBB173_13:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB173_15:
+ jne .LBB173_16
+# BB#17:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB173_18
+.LBB173_16:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB173_18:
+ jne .LBB173_19
+# BB#20:
+ movl $0, %edx
+ jmp .LBB173_21
+.LBB173_19:
+ movl 20(%ebp), %edx
+.LBB173_21:
+ jne .LBB173_22
+# BB#23:
+ movl $0, %edi
+ jmp .LBB173_24
+.LBB173_22:
+ movl 16(%ebp), %edi
+.LBB173_24:
+ jne .LBB173_25
+# BB#26:
+ movl $0, %ebx
+ jmp .LBB173_27
+.LBB173_25:
+ movl 12(%ebp), %ebx
+.LBB173_27:
+ jne .LBB173_28
+# BB#29:
+ xorl %ebp, %ebp
+ jmp .LBB173_30
+.LBB173_28:
+ movl 8(%ebp), %ebp
+.LBB173_30:
+ movl 8(%esp), %esi # 4-byte Reload
+ addl 36(%esp), %esi # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 44(%ecx)
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 52(%ecx)
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 56(%ecx)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 60(%ecx)
+ movl (%esp), %esi # 4-byte Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 64(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 68(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 76(%ecx)
+ movl %eax, 80(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%ecx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end173:
+ .size mcl_fpDbl_sub11Lbmi2, .Lfunc_end173-mcl_fpDbl_sub11Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv384x32,@function
+.LmulPv384x32: # @mulPv384x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl %edx, %eax
+ movl 56(%esp), %edx
+ mulxl 4(%eax), %ebx, %edi
+ mulxl (%eax), %esi, %ebp
+ movl %esi, 32(%esp) # 4-byte Spill
+ addl %ebx, %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ mulxl 8(%eax), %ebx, %esi
+ adcl %edi, %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ mulxl 12(%eax), %edi, %ebx
+ adcl %esi, %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ mulxl 16(%eax), %esi, %edi
+ adcl %ebx, %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ mulxl 20(%eax), %esi, %ebx
+ adcl %edi, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ mulxl 24(%eax), %esi, %edi
+ adcl %ebx, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ mulxl 28(%eax), %ebx, %esi
+ adcl %edi, %ebx
+ mulxl 32(%eax), %edi, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ mulxl 36(%eax), %esi, %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ mulxl 40(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 32(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 28(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%ecx)
+ movl %ebx, 28(%ecx)
+ movl %edi, 32(%ecx)
+ movl %esi, 36(%ecx)
+ movl %edx, 40(%ecx)
+ movl 56(%esp), %edx
+ mulxl 44(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ adcl $0, %edx
+ movl %edx, 48(%ecx)
+ movl %ecx, %eax
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end174:
+ .size .LmulPv384x32, .Lfunc_end174-.LmulPv384x32
+
+ .globl mcl_fp_mulUnitPre12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre12Lbmi2,@function
+mcl_fp_mulUnitPre12Lbmi2: # @mcl_fp_mulUnitPre12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L175$pb
+.L175$pb:
+ popl %ebx
+.Ltmp26:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp26-.L175$pb), %ebx
+ movl 120(%esp), %eax
+ movl %eax, (%esp)
+ leal 40(%esp), %ecx
+ movl 116(%esp), %edx
+ calll .LmulPv384x32
+ movl 88(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp
+ movl 56(%esp), %ebx
+ movl 52(%esp), %edi
+ movl 48(%esp), %esi
+ movl 40(%esp), %edx
+ movl 44(%esp), %ecx
+ movl 112(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end175:
+ .size mcl_fp_mulUnitPre12Lbmi2, .Lfunc_end175-mcl_fp_mulUnitPre12Lbmi2
+
+ .globl mcl_fpDbl_mulPre12Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre12Lbmi2,@function
+mcl_fpDbl_mulPre12Lbmi2: # @mcl_fpDbl_mulPre12Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $220, %esp
+ calll .L176$pb
+.L176$pb:
+ popl %ebx
+.Ltmp27:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp27-.L176$pb), %ebx
+ movl %ebx, -164(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 8(%esp)
+ movl 12(%ebp), %edi
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6Lbmi2@PLT
+ leal 24(%esi), %eax
+ movl %eax, 8(%esp)
+ leal 24(%edi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 48(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6Lbmi2@PLT
+ movl 40(%edi), %ebx
+ movl 36(%edi), %eax
+ movl 32(%edi), %edx
+ movl (%edi), %esi
+ movl 4(%edi), %ecx
+ addl 24(%edi), %esi
+ adcl 28(%edi), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ adcl 8(%edi), %edx
+ movl %edx, -188(%ebp) # 4-byte Spill
+ adcl 12(%edi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ adcl 16(%edi), %ebx
+ movl %ebx, -180(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl (%edi), %eax
+ addl 24(%edi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ movl 4(%edi), %eax
+ adcl 28(%edi), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ movl 32(%edi), %eax
+ adcl 8(%edi), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl 36(%edi), %eax
+ adcl 12(%edi), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl 40(%edi), %ecx
+ adcl 16(%edi), %ecx
+ movl 44(%edi), %eax
+ adcl 20(%edi), %eax
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -184(%ebp) # 4-byte Spill
+ movl %ebx, %edi
+ movl %edx, -156(%ebp) # 4-byte Spill
+ movl %esi, -160(%ebp) # 4-byte Spill
+ movl %esi, %edx
+ jb .LBB176_2
+# BB#1:
+ xorl %edi, %edi
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ movl $0, -160(%ebp) # 4-byte Folded Spill
+.LBB176_2:
+ movl %edi, -176(%ebp) # 4-byte Spill
+ movl 12(%ebp), %esi
+ movl 44(%esi), %edi
+ movl -112(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 20(%esi), %edi
+ movl %edi, -132(%ebp) # 4-byte Spill
+ movl %eax, -124(%ebp) # 4-byte Spill
+ movl %ecx, -112(%ebp) # 4-byte Spill
+ movl -148(%ebp), %esi # 4-byte Reload
+ movl %esi, -116(%ebp) # 4-byte Spill
+ movl -144(%ebp), %esi # 4-byte Reload
+ movl %esi, -120(%ebp) # 4-byte Spill
+ movl -140(%ebp), %esi # 4-byte Reload
+ movl %esi, -128(%ebp) # 4-byte Spill
+ movl -136(%ebp), %esi # 4-byte Reload
+ movl %esi, -152(%ebp) # 4-byte Spill
+ jb .LBB176_4
+# BB#3:
+ movl $0, -124(%ebp) # 4-byte Folded Spill
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+ movl $0, -116(%ebp) # 4-byte Folded Spill
+ movl $0, -120(%ebp) # 4-byte Folded Spill
+ movl $0, -128(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+.LBB176_4:
+ movl %edx, -84(%ebp)
+ movl -172(%ebp), %esi # 4-byte Reload
+ movl %esi, -80(%ebp)
+ movl -188(%ebp), %edx # 4-byte Reload
+ movl %edx, -76(%ebp)
+ movl -168(%ebp), %edi # 4-byte Reload
+ movl %edi, -72(%ebp)
+ movl -180(%ebp), %edx # 4-byte Reload
+ movl %edx, -68(%ebp)
+ movl -136(%ebp), %edx # 4-byte Reload
+ movl %edx, -108(%ebp)
+ movl -140(%ebp), %edx # 4-byte Reload
+ movl %edx, -104(%ebp)
+ movl -144(%ebp), %edx # 4-byte Reload
+ movl %edx, -100(%ebp)
+ movl -148(%ebp), %edx # 4-byte Reload
+ movl %edx, -96(%ebp)
+ movl %ecx, -92(%ebp)
+ movl %eax, -88(%ebp)
+ movl %edi, %ebx
+ sbbl %edx, %edx
+ movl -132(%ebp), %eax # 4-byte Reload
+ movl %eax, -64(%ebp)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB176_6
+# BB#5:
+ movl $0, %eax
+ movl $0, %ebx
+ movl $0, %esi
+.LBB176_6:
+ movl %eax, -132(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -108(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -84(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -60(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -152(%ebp), %edi # 4-byte Reload
+ addl -160(%ebp), %edi # 4-byte Folded Reload
+ adcl %esi, -128(%ebp) # 4-byte Folded Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl %eax, -120(%ebp) # 4-byte Folded Spill
+ adcl %ebx, -116(%ebp) # 4-byte Folded Spill
+ movl -176(%ebp), %eax # 4-byte Reload
+ adcl %eax, -112(%ebp) # 4-byte Folded Spill
+ movl -132(%ebp), %eax # 4-byte Reload
+ adcl %eax, -124(%ebp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -132(%ebp) # 4-byte Spill
+ movl -164(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre6Lbmi2@PLT
+ addl -36(%ebp), %edi
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -112(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+ adcl %esi, -132(%ebp) # 4-byte Folded Spill
+ movl -60(%ebp), %ecx
+ movl 8(%ebp), %eax
+ subl (%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -56(%ebp), %esi
+ sbbl 4(%eax), %esi
+ movl -52(%ebp), %ecx
+ sbbl 8(%eax), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ movl -48(%ebp), %edx
+ sbbl 12(%eax), %edx
+ movl -44(%ebp), %ebx
+ sbbl 16(%eax), %ebx
+ movl -40(%ebp), %ecx
+ sbbl 20(%eax), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 28(%eax), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 32(%eax), %ecx
+ movl %ecx, -156(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 36(%eax), %ecx
+ movl %ecx, -160(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 40(%eax), %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 44(%eax), %ecx
+ movl %ecx, -168(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, -132(%ebp) # 4-byte Folded Spill
+ movl 48(%eax), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ subl %ecx, -144(%ebp) # 4-byte Folded Spill
+ movl 52(%eax), %ecx
+ movl %ecx, -196(%ebp) # 4-byte Spill
+ sbbl %ecx, %esi
+ movl 56(%eax), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ sbbl %ecx, -136(%ebp) # 4-byte Folded Spill
+ movl 60(%eax), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 64(%eax), %ecx
+ movl %ecx, -208(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 68(%eax), %ecx
+ movl %ecx, -212(%ebp) # 4-byte Spill
+ sbbl %ecx, -140(%ebp) # 4-byte Folded Spill
+ movl 72(%eax), %ecx
+ movl %ecx, -216(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 76(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 80(%eax), %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 84(%eax), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 88(%eax), %ecx
+ movl %ecx, -184(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 92(%eax), %ecx
+ movl %ecx, -188(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, -132(%ebp) # 4-byte Folded Spill
+ movl -144(%ebp), %ecx # 4-byte Reload
+ addl -148(%ebp), %ecx # 4-byte Folded Reload
+ adcl -152(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 24(%eax)
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -156(%ebp), %ecx # 4-byte Folded Reload
+ movl %esi, 28(%eax)
+ adcl -160(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 32(%eax)
+ adcl -164(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 36(%eax)
+ movl -140(%ebp), %ecx # 4-byte Reload
+ adcl -168(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 40(%eax)
+ adcl -192(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 44(%eax)
+ movl -128(%ebp), %ecx # 4-byte Reload
+ adcl -196(%ebp), %ecx # 4-byte Folded Reload
+ movl %edi, 48(%eax)
+ movl -120(%ebp), %edx # 4-byte Reload
+ adcl -200(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ movl -116(%ebp), %ecx # 4-byte Reload
+ adcl -204(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ movl -112(%ebp), %edx # 4-byte Reload
+ adcl -208(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ movl -124(%ebp), %ecx # 4-byte Reload
+ adcl -212(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ movl -132(%ebp), %edx # 4-byte Reload
+ adcl -216(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl %edx, 72(%eax)
+ movl -172(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 76(%eax)
+ movl -176(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 80(%eax)
+ movl -180(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 88(%eax)
+ movl -188(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 92(%eax)
+ addl $220, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end176:
+ .size mcl_fpDbl_mulPre12Lbmi2, .Lfunc_end176-mcl_fpDbl_mulPre12Lbmi2
+
+ .globl mcl_fpDbl_sqrPre12Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre12Lbmi2,@function
+mcl_fpDbl_sqrPre12Lbmi2: # @mcl_fpDbl_sqrPre12Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $220, %esp
+ calll .L177$pb
+.L177$pb:
+ popl %ebx
+.Ltmp28:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp28-.L177$pb), %ebx
+ movl %ebx, -152(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre6Lbmi2@PLT
+ leal 24(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 48(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6Lbmi2@PLT
+ movl 44(%edi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ movl 40(%edi), %edx
+ movl 36(%edi), %eax
+ movl (%edi), %ebx
+ movl 4(%edi), %esi
+ addl 24(%edi), %ebx
+ adcl 28(%edi), %esi
+ movl 32(%edi), %ecx
+ adcl 8(%edi), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ adcl 12(%edi), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ adcl 16(%edi), %edx
+ movl %edx, %ecx
+ movl -136(%ebp), %eax # 4-byte Reload
+ adcl 20(%edi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edx
+ movl %edx, -156(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edx
+ popl %eax
+ movl %edx, -124(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edx
+ sbbl %edi, %edi
+ movl %edi, -148(%ebp) # 4-byte Spill
+ movl %ebx, %edi
+ addl %edi, %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+ movl %esi, %edi
+ movl %esi, %eax
+ adcl %edi, %edi
+ movl %edi, -132(%ebp) # 4-byte Spill
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB177_2
+# BB#1:
+ movl $0, -132(%ebp) # 4-byte Folded Spill
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+.LBB177_2:
+ movl -144(%ebp), %esi # 4-byte Reload
+ addl %esi, %esi
+ movl -140(%ebp), %edx # 4-byte Reload
+ adcl %edx, %edx
+ movl %edx, -116(%ebp) # 4-byte Spill
+ movl -120(%ebp), %edx # 4-byte Reload
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB177_3
+# BB#4:
+ movl $0, -116(%ebp) # 4-byte Folded Spill
+ movl $0, -120(%ebp) # 4-byte Folded Spill
+ jmp .LBB177_5
+.LBB177_3:
+ movl %eax, %edx
+ shrl $31, %edx
+ orl %esi, %edx
+ movl %edx, -120(%ebp) # 4-byte Spill
+.LBB177_5:
+ movl -136(%ebp), %edx # 4-byte Reload
+ movl %ecx, %esi
+ addl %esi, %esi
+ adcl %edx, %edx
+ movl -124(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB177_6
+# BB#7:
+ xorl %edx, %edx
+ movl $0, -128(%ebp) # 4-byte Folded Spill
+ movl -140(%ebp), %edi # 4-byte Reload
+ jmp .LBB177_8
+.LBB177_6:
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ movl -140(%ebp), %edi # 4-byte Reload
+ movl %edi, %ecx
+ shrl $31, %ecx
+ orl %esi, %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ movl -124(%ebp), %ecx # 4-byte Reload
+.LBB177_8:
+ movl %edx, -124(%ebp) # 4-byte Spill
+ movl %ebx, -84(%ebp)
+ movl %eax, -80(%ebp)
+ movl -144(%ebp), %esi # 4-byte Reload
+ movl %esi, -76(%ebp)
+ movl %edi, -72(%ebp)
+ movl %ecx, -68(%ebp)
+ movl -136(%ebp), %edx # 4-byte Reload
+ movl %edx, -64(%ebp)
+ movl %ebx, -108(%ebp)
+ movl %eax, -104(%ebp)
+ movl %esi, -100(%ebp)
+ movl %edi, -96(%ebp)
+ movl %ecx, -92(%ebp)
+ movl %edx, -88(%ebp)
+ movl -156(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB177_9
+# BB#10:
+ movl $0, -136(%ebp) # 4-byte Folded Spill
+ jmp .LBB177_11
+.LBB177_9:
+ shrl $31, %edx
+ movl %edx, -136(%ebp) # 4-byte Spill
+.LBB177_11:
+ leal -108(%ebp), %eax
+ movl %eax, 8(%esp)
+ leal -84(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -60(%ebp), %eax
+ movl %eax, (%esp)
+ movl -148(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -152(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre6Lbmi2@PLT
+ movl -112(%ebp), %eax # 4-byte Reload
+ addl -36(%ebp), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ movl -132(%ebp), %edi # 4-byte Reload
+ adcl -32(%ebp), %edi
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+ adcl -136(%ebp), %esi # 4-byte Folded Reload
+ movl -60(%ebp), %edx
+ movl 8(%ebp), %eax
+ subl (%eax), %edx
+ movl -56(%ebp), %ebx
+ sbbl 4(%eax), %ebx
+ movl -52(%ebp), %ecx
+ sbbl 8(%eax), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ movl -48(%ebp), %ecx
+ sbbl 12(%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -44(%ebp), %ecx
+ sbbl 16(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ecx
+ sbbl 20(%eax), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 28(%eax), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl %edi, -132(%ebp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, -156(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 36(%eax), %ecx
+ movl %ecx, -160(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 40(%eax), %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 44(%eax), %ecx
+ movl %ecx, -168(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ movl 48(%eax), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ subl %ecx, %edx
+ movl 52(%eax), %ecx
+ movl %ecx, -196(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 56(%eax), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ sbbl %ecx, -136(%ebp) # 4-byte Folded Spill
+ movl 60(%eax), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ sbbl %ecx, -144(%ebp) # 4-byte Folded Spill
+ movl 64(%eax), %ecx
+ movl %ecx, -208(%ebp) # 4-byte Spill
+ movl -172(%ebp), %edi # 4-byte Reload
+ sbbl %ecx, %edi
+ movl 68(%eax), %ecx
+ movl %ecx, -212(%ebp) # 4-byte Spill
+ sbbl %ecx, -140(%ebp) # 4-byte Folded Spill
+ movl 72(%eax), %ecx
+ movl %ecx, -216(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 76(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ sbbl %ecx, -132(%ebp) # 4-byte Folded Spill
+ movl 80(%eax), %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 84(%eax), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 88(%eax), %ecx
+ movl %ecx, -184(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 92(%eax), %ecx
+ movl %ecx, -188(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ addl -148(%ebp), %edx # 4-byte Folded Reload
+ adcl -152(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 24(%eax)
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -156(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 28(%eax)
+ movl -144(%ebp), %edx # 4-byte Reload
+ adcl -160(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 32(%eax)
+ adcl -164(%ebp), %edi # 4-byte Folded Reload
+ movl %edx, 36(%eax)
+ movl -140(%ebp), %edx # 4-byte Reload
+ adcl -168(%ebp), %edx # 4-byte Folded Reload
+ movl %edi, 40(%eax)
+ movl -112(%ebp), %ecx # 4-byte Reload
+ adcl -192(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 44(%eax)
+ movl -132(%ebp), %edi # 4-byte Reload
+ adcl -196(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 48(%eax)
+ movl -120(%ebp), %edx # 4-byte Reload
+ adcl -200(%ebp), %edx # 4-byte Folded Reload
+ movl %edi, 52(%eax)
+ movl -116(%ebp), %ecx # 4-byte Reload
+ adcl -204(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ movl -128(%ebp), %edx # 4-byte Reload
+ adcl -208(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ movl -124(%ebp), %ecx # 4-byte Reload
+ adcl -212(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ adcl -216(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl %esi, 72(%eax)
+ movl -172(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 76(%eax)
+ movl -176(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 80(%eax)
+ movl -180(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 88(%eax)
+ movl -188(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 92(%eax)
+ addl $220, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end177:
+ .size mcl_fpDbl_sqrPre12Lbmi2, .Lfunc_end177-mcl_fpDbl_sqrPre12Lbmi2
+
+ .globl mcl_fp_mont12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont12Lbmi2,@function
+mcl_fp_mont12Lbmi2: # @mcl_fp_mont12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1436, %esp # imm = 0x59C
+ calll .L178$pb
+.L178$pb:
+ popl %ebx
+.Ltmp29:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp29-.L178$pb), %ebx
+ movl 1468(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1384(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 1384(%esp), %ebp
+ movl 1388(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 1432(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 1428(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1424(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1420(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1416(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1412(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1408(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1404(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1400(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1396(%esp), %edi
+ movl 1392(%esp), %esi
+ movl %eax, (%esp)
+ leal 1328(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ addl 1328(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1336(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 1340(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1372(%esp), %esi
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1376(%esp), %ebp
+ sbbl %edi, %edi
+ movl 1464(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1272(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %edi
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1272(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1312(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1316(%esp), %ebp
+ adcl 1320(%esp), %edi
+ sbbl %eax, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1216(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 84(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1216(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1224(%esp), %esi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1228(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1244(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1248(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1252(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1256(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1260(%esp), %ebp
+ adcl 1264(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1160(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1160(%esp), %ecx
+ adcl 1164(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1200(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ adcl 1204(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1104(%esp), %ecx
+ movl 1468(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1104(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1140(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1148(%esp), %edi
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1152(%esp), %ebp
+ adcl $0, %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1048(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 1048(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1080(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1088(%esp), %edi
+ adcl 1092(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %esi
+ movl %esi, %eax
+ addl 992(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 996(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1000(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 1004(%esp), %ebp
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1008(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1012(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1016(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1020(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1024(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1028(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1032(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1036(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1040(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl $0, %edi
+ movl 1464(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 936(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 944(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 948(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 960(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 980(%esp), %esi
+ adcl 984(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %edi
+ movl %edi, %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 880(%esp), %eax
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 892(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 912(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 924(%esp), %esi
+ movl %esi, %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 824(%esp), %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 840(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 864(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 768(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 768(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 780(%esp), %ebp
+ adcl 784(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 800(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 808(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 1460(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ movl 44(%esp), %eax # 4-byte Reload
+ addl 712(%esp), %eax
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 720(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 724(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 728(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 732(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 736(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 740(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 744(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 748(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 752(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 760(%esp), %edi
+ sbbl %ebp, %ebp
+ movl %eax, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 656(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 656(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 660(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 664(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 672(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 676(%esp), %ebp
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 704(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl $0, %edi
+ movl 1464(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 600(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 600(%esp), %ecx
+ adcl 604(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 616(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 620(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 636(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 648(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 44(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 544(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 548(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 552(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 560(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 564(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 568(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 572(%esp), %edi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 576(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 580(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 584(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 588(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 592(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 488(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 512(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 524(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 532(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 536(%esp), %ebp
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 432(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 440(%esp), %edi
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 444(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 480(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 376(%esp), %ecx
+ adcl 380(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 392(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 416(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 320(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 320(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ adcl 336(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 344(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 360(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 264(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 272(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 284(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 288(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 296(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 208(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 224(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 236(%esp), %edi
+ adcl 240(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 248(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 152(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 164(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 176(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 188(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 196(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 96(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %esi
+ addl 96(%esp), %edi
+ movl 84(%esp), %ebx # 4-byte Reload
+ movl 92(%esp), %eax # 4-byte Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %edx, %edi
+ adcl 108(%esp), %ebx
+ adcl 112(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 132(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 140(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ adcl $0, %esi
+ movl 1468(%esp), %edx
+ subl (%edx), %eax
+ sbbl 4(%edx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ sbbl 8(%edx), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ sbbl 12(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 16(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 20(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 24(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edx), %ecx
+ movl 44(%esp), %edi # 4-byte Reload
+ sbbl 32(%edx), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 36(%edx), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 40(%edx), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ sbbl 44(%edx), %ebp
+ movl %ebp, %edx
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB178_2
+# BB#1:
+ movl %ecx, 52(%esp) # 4-byte Spill
+.LBB178_2:
+ movl %esi, %ecx
+ testb %cl, %cl
+ movl 92(%esp), %ecx # 4-byte Reload
+ jne .LBB178_4
+# BB#3:
+ movl %eax, %ecx
+.LBB178_4:
+ movl 1456(%esp), %eax
+ movl %ecx, (%eax)
+ movl 68(%esp), %edi # 4-byte Reload
+ jne .LBB178_6
+# BB#5:
+ movl 16(%esp), %edi # 4-byte Reload
+.LBB178_6:
+ movl %edi, 4(%eax)
+ movl 64(%esp), %ebp # 4-byte Reload
+ jne .LBB178_8
+# BB#7:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB178_8:
+ movl %ebx, 8(%eax)
+ jne .LBB178_10
+# BB#9:
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+.LBB178_10:
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ jne .LBB178_12
+# BB#11:
+ movl 28(%esp), %ebp # 4-byte Reload
+.LBB178_12:
+ movl %ebp, 16(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB178_14
+# BB#13:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB178_14:
+ movl %ecx, 20(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB178_16
+# BB#15:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB178_16:
+ movl %ecx, 24(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB178_18
+# BB#17:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB178_18:
+ movl %ecx, 32(%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ jne .LBB178_20
+# BB#19:
+ movl 80(%esp), %ecx # 4-byte Reload
+.LBB178_20:
+ movl %ecx, 36(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ jne .LBB178_22
+# BB#21:
+ movl 84(%esp), %ecx # 4-byte Reload
+.LBB178_22:
+ movl %ecx, 40(%eax)
+ movl 88(%esp), %ecx # 4-byte Reload
+ jne .LBB178_24
+# BB#23:
+ movl %edx, %ecx
+.LBB178_24:
+ movl %ecx, 44(%eax)
+ addl $1436, %esp # imm = 0x59C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end178:
+ .size mcl_fp_mont12Lbmi2, .Lfunc_end178-mcl_fp_mont12Lbmi2
+
+ .globl mcl_fp_montNF12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF12Lbmi2,@function
+mcl_fp_montNF12Lbmi2: # @mcl_fp_montNF12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1420, %esp # imm = 0x58C
+ calll .L179$pb
+.L179$pb:
+ popl %ebx
+.Ltmp30:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp30-.L179$pb), %ebx
+ movl 1452(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1368(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1368(%esp), %ebp
+ movl 1372(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 1416(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1412(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1408(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1404(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1400(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1396(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1392(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1388(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1384(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1380(%esp), %edi
+ movl 1376(%esp), %esi
+ movl %eax, (%esp)
+ leal 1312(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 1312(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1320(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 1324(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1344(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1356(%esp), %esi
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1360(%esp), %ebp
+ movl 1448(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1256(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1304(%esp), %eax
+ movl 56(%esp), %edx # 4-byte Reload
+ addl 1256(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1260(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1264(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1268(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1272(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 1276(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1280(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 1284(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1288(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1296(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 1300(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, %edi
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1200(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 1200(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 1208(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1244(%esp), %ebp
+ adcl 1248(%esp), %edi
+ movl 1448(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1192(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1144(%esp), %edx
+ adcl 1148(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1152(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1156(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 1160(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1164(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1168(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1172(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1176(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1180(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 1184(%esp), %ebp
+ adcl 1188(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1088(%esp), %ecx
+ movl 1452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ addl 1088(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %esi, %edi
+ adcl 1104(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1124(%esp), %esi
+ adcl 1128(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1136(%esp), %ebp
+ movl 1448(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1032(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1080(%esp), %eax
+ movl 52(%esp), %edx # 4-byte Reload
+ addl 1032(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1036(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1040(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 1044(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1056(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1060(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1064(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1072(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1076(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %edi
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 976(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 976(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1004(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1012(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1024(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 920(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 968(%esp), %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ addl 920(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 924(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 928(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 932(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 936(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 940(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 944(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 952(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 956(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 960(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 964(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 864(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 864(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 884(%esp), %ebp
+ adcl 888(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 900(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 912(%esp), %edi
+ movl 1448(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 808(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 856(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 808(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 824(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl 828(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 832(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ addl 752(%esp), %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 760(%esp), %edi
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 764(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 776(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 792(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1448(%esp), %ecx
+ movl %ecx, %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1444(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ movl 744(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ adcl 700(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 704(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 708(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 712(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 716(%esp), %esi
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 720(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 724(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 728(%esp), %edi
+ adcl 732(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 736(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 740(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 640(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 648(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 660(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 668(%esp), %esi
+ adcl 672(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 676(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 584(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 632(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 584(%esp), %ecx
+ adcl 588(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 596(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 608(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 616(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 620(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 528(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 528(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 540(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 564(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 568(%esp), %edi
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 572(%esp), %esi
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 576(%esp), %ebp
+ movl 1448(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 520(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 472(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 508(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 512(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 516(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 416(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 440(%esp), %ebp
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 444(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 408(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 360(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 372(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 380(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 304(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 312(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 320(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 328(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 296(%esp), %edx
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 248(%esp), %ecx
+ adcl 252(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 260(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 272(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 192(%esp), %esi
+ adcl 196(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 200(%esp), %edi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 204(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 216(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 224(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 184(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 136(%esp), %ecx
+ adcl 140(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 144(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 160(%esp), %edi
+ adcl 164(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 168(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 80(%esp), %ecx
+ movl 1452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ addl 80(%esp), %esi
+ movl 56(%esp), %esi # 4-byte Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 92(%esp), %esi
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 104(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 112(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1452(%esp), %ebp
+ subl (%ebp), %edx
+ movl %ecx, %eax
+ sbbl 4(%ebp), %eax
+ movl %esi, %ebx
+ sbbl 8(%ebp), %ebx
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl 40(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ sbbl 28(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ sbbl 32(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ sbbl 36(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 40(%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 44(%ebp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ movl 76(%esp), %ebp # 4-byte Reload
+ js .LBB179_2
+# BB#1:
+ movl %edx, %ebp
+.LBB179_2:
+ movl 1440(%esp), %edx
+ movl %ebp, (%edx)
+ movl 68(%esp), %edi # 4-byte Reload
+ js .LBB179_4
+# BB#3:
+ movl %eax, %edi
+.LBB179_4:
+ movl %edi, 4(%edx)
+ js .LBB179_6
+# BB#5:
+ movl %ebx, %esi
+.LBB179_6:
+ movl %esi, 8(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB179_8
+# BB#7:
+ movl %ecx, %eax
+.LBB179_8:
+ movl %eax, 12(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB179_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB179_10:
+ movl %eax, 16(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB179_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB179_12:
+ movl %eax, 20(%edx)
+ movl 32(%esp), %eax # 4-byte Reload
+ js .LBB179_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB179_14:
+ movl %eax, 24(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB179_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB179_16:
+ movl %eax, 28(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB179_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB179_18:
+ movl %eax, 32(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB179_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB179_20:
+ movl %eax, 36(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB179_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB179_22:
+ movl %eax, 40(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB179_24
+# BB#23:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB179_24:
+ movl %eax, 44(%edx)
+ addl $1420, %esp # imm = 0x58C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end179:
+ .size mcl_fp_montNF12Lbmi2, .Lfunc_end179-mcl_fp_montNF12Lbmi2
+
+ .globl mcl_fp_montRed12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed12Lbmi2,@function
+mcl_fp_montRed12Lbmi2: # @mcl_fp_montRed12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $828, %esp # imm = 0x33C
+ calll .L180$pb
+.L180$pb:
+ popl %eax
+.Ltmp31:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp31-.L180$pb), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 856(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 852(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 88(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 92(%ecx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 88(%ecx), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 80(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 68(%ecx), %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 148(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 48(%ecx), %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ movl 44(%ecx), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 32(%ecx), %edi
+ movl 28(%ecx), %esi
+ movl 24(%ecx), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 776(%esp), %ecx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ movl 88(%esp), %eax # 4-byte Reload
+ addl 776(%esp), %eax
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 796(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 804(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl 808(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 720(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 720(%esp), %esi
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 724(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 752(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 664(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 692(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 696(%esp), %ebp
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl 712(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 608(%esp), %esi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 612(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 636(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 552(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %esi
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 496(%esp), %edi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ adcl 528(%esp), %ebp
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 532(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 440(%esp), %esi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl 468(%esp), %ebp
+ movl %ebp, 156(%esp) # 4-byte Spill
+ adcl 472(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %esi # 4-byte Reload
+ adcl 476(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 856(%esp), %eax
+ movl %eax, %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 384(%esp), %edi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 388(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl 400(%esp), %ebp
+ movl 152(%esp), %edi # 4-byte Reload
+ adcl 404(%esp), %edi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl 416(%esp), %esi
+ movl %esi, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %esi # 4-byte Reload
+ adcl 424(%esp), %esi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ movl 100(%esp), %eax # 4-byte Reload
+ addl 328(%esp), %eax
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 336(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl 340(%esp), %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ adcl 344(%esp), %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 348(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 352(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 356(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 360(%esp), %ebp
+ adcl 364(%esp), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 368(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 376(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %eax, %esi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 272(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 280(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 288(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 292(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 296(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 300(%esp), %esi
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 304(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 308(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 312(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 316(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, %ebp
+ movl %eax, %edi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 216(%esp), %edi
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 220(%esp), %ecx
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl 240(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl 248(%esp), %esi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 160(%esp), %edi
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %edx # 4-byte Reload
+ adcl 172(%esp), %edx
+ movl %edx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ebx # 4-byte Reload
+ adcl 176(%esp), %ebx
+ movl %ebx, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 180(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 188(%esp), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ subl 24(%esp), %edi # 4-byte Folded Reload
+ movl 156(%esp), %esi # 4-byte Reload
+ sbbl 16(%esp), %esi # 4-byte Folded Reload
+ sbbl 20(%esp), %edx # 4-byte Folded Reload
+ sbbl 28(%esp), %ebx # 4-byte Folded Reload
+ sbbl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 140(%esp), %eax # 4-byte Reload
+ sbbl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ sbbl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ sbbl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 132(%esp) # 4-byte Spill
+ sbbl $0, %ebp
+ andl $1, %ebp
+ jne .LBB180_2
+# BB#1:
+ movl %ebx, 148(%esp) # 4-byte Spill
+.LBB180_2:
+ movl %ebp, %ebx
+ testb %bl, %bl
+ movl 152(%esp), %ebx # 4-byte Reload
+ jne .LBB180_4
+# BB#3:
+ movl %edi, %ebx
+.LBB180_4:
+ movl 848(%esp), %edi
+ movl %ebx, (%edi)
+ movl 144(%esp), %ebx # 4-byte Reload
+ jne .LBB180_6
+# BB#5:
+ movl %esi, 156(%esp) # 4-byte Spill
+.LBB180_6:
+ movl 156(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%edi)
+ movl 136(%esp), %esi # 4-byte Reload
+ jne .LBB180_8
+# BB#7:
+ movl %edx, %esi
+.LBB180_8:
+ movl %esi, 8(%edi)
+ movl 148(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%edi)
+ movl 128(%esp), %esi # 4-byte Reload
+ movl 116(%esp), %edx # 4-byte Reload
+ jne .LBB180_10
+# BB#9:
+ movl %ecx, %edx
+.LBB180_10:
+ movl %edx, 16(%edi)
+ movl 120(%esp), %edx # 4-byte Reload
+ movl 140(%esp), %ecx # 4-byte Reload
+ jne .LBB180_12
+# BB#11:
+ movl 84(%esp), %ecx # 4-byte Reload
+.LBB180_12:
+ movl %ecx, 20(%edi)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ jne .LBB180_14
+# BB#13:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB180_14:
+ movl %eax, 24(%edi)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB180_16
+# BB#15:
+ movl 92(%esp), %ebx # 4-byte Reload
+.LBB180_16:
+ movl %ebx, 28(%edi)
+ jne .LBB180_18
+# BB#17:
+ movl 96(%esp), %esi # 4-byte Reload
+.LBB180_18:
+ movl %esi, 32(%edi)
+ jne .LBB180_20
+# BB#19:
+ movl 100(%esp), %edx # 4-byte Reload
+.LBB180_20:
+ movl %edx, 36(%edi)
+ jne .LBB180_22
+# BB#21:
+ movl 112(%esp), %ecx # 4-byte Reload
+.LBB180_22:
+ movl %ecx, 40(%edi)
+ jne .LBB180_24
+# BB#23:
+ movl 132(%esp), %eax # 4-byte Reload
+.LBB180_24:
+ movl %eax, 44(%edi)
+ addl $828, %esp # imm = 0x33C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end180:
+ .size mcl_fp_montRed12Lbmi2, .Lfunc_end180-mcl_fp_montRed12Lbmi2
+
+ .globl mcl_fp_addPre12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre12Lbmi2,@function
+mcl_fp_addPre12Lbmi2: # @mcl_fp_addPre12Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ebx)
+ movl 40(%ecx), %esi
+ adcl %edi, %esi
+ movl %edx, 36(%ebx)
+ movl %esi, 40(%ebx)
+ movl 44(%eax), %eax
+ movl 44(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 44(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end181:
+ .size mcl_fp_addPre12Lbmi2, .Lfunc_end181-mcl_fp_addPre12Lbmi2
+
+ .globl mcl_fp_subPre12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre12Lbmi2,@function
+mcl_fp_subPre12Lbmi2: # @mcl_fp_subPre12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ebp)
+ movl 40(%ecx), %edi
+ sbbl %ebx, %edi
+ movl %esi, 36(%ebp)
+ movl %edi, 40(%ebp)
+ movl 44(%edx), %edx
+ movl 44(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 44(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end182:
+ .size mcl_fp_subPre12Lbmi2, .Lfunc_end182-mcl_fp_subPre12Lbmi2
+
+ .globl mcl_fp_shr1_12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_12Lbmi2,@function
+mcl_fp_shr1_12Lbmi2: # @mcl_fp_shr1_12Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 40(%ecx)
+ shrl %eax
+ movl %eax, 44(%ecx)
+ popl %esi
+ retl
+.Lfunc_end183:
+ .size mcl_fp_shr1_12Lbmi2, .Lfunc_end183-mcl_fp_shr1_12Lbmi2
+
+ .globl mcl_fp_add12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add12Lbmi2,@function
+mcl_fp_add12Lbmi2: # @mcl_fp_add12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 64(%esp), %ebx
+ movl (%ebx), %edx
+ movl 4(%ebx), %ecx
+ movl 60(%esp), %eax
+ addl (%eax), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 4(%eax), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 8(%ebx), %ecx
+ adcl 8(%eax), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 12(%eax), %edx
+ movl 16(%eax), %ecx
+ adcl 12(%ebx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 16(%ebx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 20(%eax), %ecx
+ adcl 20(%ebx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ adcl 24(%ebx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 28(%eax), %ecx
+ adcl 28(%ebx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 32(%eax), %ebp
+ adcl 32(%ebx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl 36(%eax), %edi
+ adcl 36(%ebx), %edi
+ movl 40(%eax), %esi
+ adcl 40(%ebx), %esi
+ movl 44(%eax), %edx
+ adcl 44(%ebx), %edx
+ movl 56(%esp), %ebx
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%ebx)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%ebx)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%ebx)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%ebx)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%ebx)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%ebx)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%ebx)
+ movl %ebp, 32(%ebx)
+ movl %edi, 36(%ebx)
+ movl %esi, 40(%ebx)
+ movl %edx, 44(%ebx)
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 68(%esp), %ebp
+ subl (%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ sbbl 4(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ sbbl 8(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ sbbl 12(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ sbbl 16(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 20(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ sbbl 24(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ sbbl 28(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl (%esp), %eax # 4-byte Reload
+ sbbl 32(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 36(%ebp), %edi
+ sbbl 40(%ebp), %esi
+ sbbl 44(%ebp), %edx
+ sbbl $0, %ecx
+ testb $1, %cl
+ jne .LBB184_2
+# BB#1: # %nocarry
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ebx)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ebx)
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebx)
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebx)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebx)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%ebx)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebx)
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, 32(%ebx)
+ movl %edi, 36(%ebx)
+ movl %esi, 40(%ebx)
+ movl %edx, 44(%ebx)
+.LBB184_2: # %carry
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end184:
+ .size mcl_fp_add12Lbmi2, .Lfunc_end184-mcl_fp_add12Lbmi2
+
+ .globl mcl_fp_addNF12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF12Lbmi2,@function
+mcl_fp_addNF12Lbmi2: # @mcl_fp_addNF12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 116(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ movl 112(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 4(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 40(%esi), %ebp
+ movl 36(%esi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 20(%esi), %ebx
+ movl 16(%esi), %edi
+ movl 12(%esi), %ecx
+ movl 8(%esi), %eax
+ adcl 8(%edx), %eax
+ adcl 12(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 16(%edx), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 20(%edx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 24(%edx), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 28(%edx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 32(%edx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 36(%edx), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl %eax, %esi
+ adcl 40(%edx), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 44(%edx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp
+ movl 60(%esp), %edx # 4-byte Reload
+ subl (%ebp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 4(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ sbbl 8(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 16(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%ebp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ sbbl 40(%ebp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 44(%ebp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ movl 60(%esp), %edi # 4-byte Reload
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ js .LBB185_2
+# BB#1:
+ movl %edx, %edi
+.LBB185_2:
+ movl 108(%esp), %edx
+ movl %edi, (%edx)
+ movl 64(%esp), %edi # 4-byte Reload
+ js .LBB185_4
+# BB#3:
+ movl (%esp), %edi # 4-byte Reload
+.LBB185_4:
+ movl %edi, 4(%edx)
+ movl %eax, %ebp
+ js .LBB185_6
+# BB#5:
+ movl 4(%esp), %esi # 4-byte Reload
+.LBB185_6:
+ movl %esi, 8(%edx)
+ movl %ecx, %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ js .LBB185_8
+# BB#7:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB185_8:
+ movl %ecx, 12(%edx)
+ movl 76(%esp), %ebx # 4-byte Reload
+ movl 84(%esp), %edi # 4-byte Reload
+ js .LBB185_10
+# BB#9:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB185_10:
+ movl %eax, 16(%edx)
+ movl 80(%esp), %ecx # 4-byte Reload
+ js .LBB185_12
+# BB#11:
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+.LBB185_12:
+ movl 56(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%edx)
+ js .LBB185_14
+# BB#13:
+ movl 20(%esp), %ebp # 4-byte Reload
+.LBB185_14:
+ movl %ebp, 24(%edx)
+ js .LBB185_16
+# BB#15:
+ movl 24(%esp), %edi # 4-byte Reload
+.LBB185_16:
+ movl %edi, 28(%edx)
+ js .LBB185_18
+# BB#17:
+ movl 28(%esp), %ebx # 4-byte Reload
+.LBB185_18:
+ movl %ebx, 32(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB185_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB185_20:
+ movl %eax, 36(%edx)
+ js .LBB185_22
+# BB#21:
+ movl 36(%esp), %esi # 4-byte Reload
+.LBB185_22:
+ movl %esi, 40(%edx)
+ js .LBB185_24
+# BB#23:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB185_24:
+ movl %ecx, 44(%edx)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end185:
+ .size mcl_fp_addNF12Lbmi2, .Lfunc_end185-mcl_fp_addNF12Lbmi2
+
+ .globl mcl_fp_sub12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub12Lbmi2,@function
+mcl_fp_sub12Lbmi2: # @mcl_fp_sub12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ xorl %ebx, %ebx
+ movl 68(%esp), %edi
+ subl (%edi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 28(%esi), %edx
+ sbbl 28(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 32(%esi), %ecx
+ sbbl 32(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 40(%esi), %ebp
+ sbbl 40(%edi), %ebp
+ movl 44(%esi), %esi
+ sbbl 44(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 60(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl %edx, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ movl %eax, 36(%ebx)
+ movl %ebp, 40(%ebx)
+ movl %esi, 44(%ebx)
+ je .LBB186_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 72(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl %eax, 36(%ebx)
+ movl 40(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 40(%ebx)
+ movl 44(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 44(%ebx)
+.LBB186_2: # %nocarry
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end186:
+ .size mcl_fp_sub12Lbmi2, .Lfunc_end186-mcl_fp_sub12Lbmi2
+
+ .globl mcl_fp_subNF12Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF12Lbmi2,@function
+mcl_fp_subNF12Lbmi2: # @mcl_fp_subNF12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 96(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 100(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ sarl $31, %eax
+ movl %eax, %edx
+ addl %edx, %edx
+ movl %eax, %edi
+ adcl %edi, %edi
+ movl %eax, %ebp
+ adcl %ebp, %ebp
+ movl %eax, %esi
+ adcl %esi, %esi
+ shrl $31, %ecx
+ orl %edx, %ecx
+ movl 104(%esp), %edx
+ andl 12(%edx), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ andl 8(%edx), %ebp
+ andl 4(%edx), %edi
+ andl (%edx), %ecx
+ movl 44(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 40(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 36(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 32(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 28(%edx), %esi
+ andl %eax, %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 24(%edx), %ebx
+ andl %eax, %ebx
+ movl 20(%edx), %esi
+ andl %eax, %esi
+ andl 16(%edx), %eax
+ addl 48(%esp), %ecx # 4-byte Folded Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 92(%esp), %edx
+ movl %ecx, (%edx)
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 4(%edx)
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 8(%edx)
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 12(%edx)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 16(%edx)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 20(%edx)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 24(%edx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 28(%edx)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%edx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%edx)
+ movl %eax, 40(%edx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%edx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end187:
+ .size mcl_fp_subNF12Lbmi2, .Lfunc_end187-mcl_fp_subNF12Lbmi2
+
+ .globl mcl_fpDbl_add12Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add12Lbmi2,@function
+mcl_fpDbl_add12Lbmi2: # @mcl_fpDbl_add12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 116(%esp), %ecx
+ movl 112(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edi), %ebp
+ movl 108(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%ecx), %esi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 56(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%edi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%edi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebx, %esi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%edi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %esi, 36(%eax)
+ movl 44(%edi), %esi
+ adcl %ebx, %esi
+ movl 48(%ecx), %ebx
+ movl %edx, 40(%eax)
+ movl 48(%edi), %edx
+ adcl %ebx, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 52(%ecx), %ebx
+ movl %esi, 44(%eax)
+ movl 52(%edi), %eax
+ adcl %ebx, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 56(%edi), %eax
+ adcl %ebp, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl 60(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl 64(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl 68(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl 72(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl 76(%edi), %edx
+ adcl %eax, %edx
+ movl 80(%ecx), %esi
+ movl 80(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 84(%ecx), %ebx
+ movl 84(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 88(%ecx), %ebp
+ movl 88(%edi), %ebx
+ adcl %ebp, %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 92(%ecx), %ecx
+ movl 92(%edi), %edi
+ adcl %ecx, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 120(%esp), %ebp
+ movl 72(%esp), %edi # 4-byte Reload
+ subl (%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %edx, %edi
+ sbbl 28(%ebp), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 32(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl 36(%ebp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl 44(%esp), %ebx # 4-byte Reload
+ sbbl 40(%ebp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ sbbl 44(%ebp), %edi
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB188_2
+# BB#1:
+ movl %edi, %ebx
+.LBB188_2:
+ testb %cl, %cl
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB188_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB188_4:
+ movl 108(%esp), %eax
+ movl %ecx, 48(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl %ebp, 64(%eax)
+ movl %edi, 68(%eax)
+ movl %esi, 72(%eax)
+ movl %edx, 76(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ jne .LBB188_6
+# BB#5:
+ movl 32(%esp), %edx # 4-byte Reload
+.LBB188_6:
+ movl %edx, 80(%eax)
+ movl 52(%esp), %edx # 4-byte Reload
+ jne .LBB188_8
+# BB#7:
+ movl 36(%esp), %edx # 4-byte Reload
+.LBB188_8:
+ movl %edx, 84(%eax)
+ jne .LBB188_10
+# BB#9:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB188_10:
+ movl %ecx, 88(%eax)
+ movl %ebx, 92(%eax)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end188:
+ .size mcl_fpDbl_add12Lbmi2, .Lfunc_end188-mcl_fpDbl_add12Lbmi2
+
+ .globl mcl_fpDbl_sub12Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub12Lbmi2,@function
+mcl_fpDbl_sub12Lbmi2: # @mcl_fpDbl_sub12Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $76, %esp
+ movl 100(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %edx
+ movl 104(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %edx
+ movl 8(%esi), %edi
+ sbbl 8(%ebx), %edi
+ movl 96(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%esi), %eax
+ sbbl 12(%ebx), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%esi), %edx
+ sbbl 16(%ebx), %edx
+ movl %edi, 8(%ecx)
+ movl 20(%ebx), %edi
+ movl %eax, 12(%ecx)
+ movl 20(%esi), %eax
+ sbbl %edi, %eax
+ movl 24(%ebx), %edi
+ movl %edx, 16(%ecx)
+ movl 24(%esi), %edx
+ sbbl %edi, %edx
+ movl 28(%ebx), %edi
+ movl %eax, 20(%ecx)
+ movl 28(%esi), %eax
+ sbbl %edi, %eax
+ movl 32(%ebx), %edi
+ movl %edx, 24(%ecx)
+ movl 32(%esi), %edx
+ sbbl %edi, %edx
+ movl 36(%ebx), %edi
+ movl %eax, 28(%ecx)
+ movl 36(%esi), %eax
+ sbbl %edi, %eax
+ movl 40(%ebx), %edi
+ movl %edx, 32(%ecx)
+ movl 40(%esi), %edx
+ sbbl %edi, %edx
+ movl 44(%ebx), %edi
+ movl %eax, 36(%ecx)
+ movl 44(%esi), %eax
+ sbbl %edi, %eax
+ movl 48(%ebx), %edi
+ movl %edx, 40(%ecx)
+ movl 48(%esi), %edx
+ sbbl %edi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 52(%ebx), %edx
+ movl %eax, 44(%ecx)
+ movl 52(%esi), %eax
+ sbbl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 56(%ebx), %eax
+ movl 56(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 60(%ebx), %eax
+ movl 60(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 64(%ebx), %eax
+ movl 64(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 68(%ebx), %eax
+ movl 68(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 72(%ebx), %eax
+ movl 72(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 76(%ebx), %eax
+ movl 76(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 80(%ebx), %eax
+ movl 80(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 84(%ebx), %eax
+ movl 84(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 88(%ebx), %eax
+ movl 88(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 92(%ebx), %eax
+ movl 92(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 108(%esp), %ebp
+ jne .LBB189_1
+# BB#2:
+ movl $0, 36(%esp) # 4-byte Folded Spill
+ jmp .LBB189_3
+.LBB189_1:
+ movl 44(%ebp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+.LBB189_3:
+ testb %al, %al
+ jne .LBB189_4
+# BB#5:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ movl $0, %esi
+ jmp .LBB189_6
+.LBB189_4:
+ movl (%ebp), %esi
+ movl 4(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB189_6:
+ jne .LBB189_7
+# BB#8:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB189_9
+.LBB189_7:
+ movl 40(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB189_9:
+ jne .LBB189_10
+# BB#11:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB189_12
+.LBB189_10:
+ movl 36(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB189_12:
+ jne .LBB189_13
+# BB#14:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB189_15
+.LBB189_13:
+ movl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB189_15:
+ jne .LBB189_16
+# BB#17:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB189_18
+.LBB189_16:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB189_18:
+ jne .LBB189_19
+# BB#20:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB189_21
+.LBB189_19:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB189_21:
+ jne .LBB189_22
+# BB#23:
+ movl $0, %ebx
+ jmp .LBB189_24
+.LBB189_22:
+ movl 20(%ebp), %ebx
+.LBB189_24:
+ jne .LBB189_25
+# BB#26:
+ movl $0, %eax
+ jmp .LBB189_27
+.LBB189_25:
+ movl 16(%ebp), %eax
+.LBB189_27:
+ jne .LBB189_28
+# BB#29:
+ movl %ebp, %edx
+ movl $0, %ebp
+ jmp .LBB189_30
+.LBB189_28:
+ movl %ebp, %edx
+ movl 12(%edx), %ebp
+.LBB189_30:
+ jne .LBB189_31
+# BB#32:
+ xorl %edx, %edx
+ jmp .LBB189_33
+.LBB189_31:
+ movl 8(%edx), %edx
+.LBB189_33:
+ addl 32(%esp), %esi # 4-byte Folded Reload
+ movl 12(%esp), %edi # 4-byte Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 52(%ecx)
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, 56(%ecx)
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 60(%ecx)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 68(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 76(%ecx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 80(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 84(%ecx)
+ movl %eax, 88(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%ecx)
+ addl $76, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end189:
+ .size mcl_fpDbl_sub12Lbmi2, .Lfunc_end189-mcl_fpDbl_sub12Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv416x32,@function
+.LmulPv416x32: # @mulPv416x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl %edx, %eax
+ movl 64(%esp), %ebx
+ movl %ebx, %edx
+ mulxl 4(%eax), %esi, %ebp
+ movl %ebx, %edx
+ mulxl (%eax), %edi, %edx
+ movl %edi, 40(%esp) # 4-byte Spill
+ addl %esi, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 8(%eax), %edx, %esi
+ adcl %ebp, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 12(%eax), %edx, %edi
+ adcl %esi, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 16(%eax), %edx, %esi
+ adcl %edi, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 20(%eax), %edx, %edi
+ adcl %esi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 24(%eax), %edx, %esi
+ adcl %edi, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 28(%eax), %edx, %edi
+ adcl %esi, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 32(%eax), %edx, %esi
+ adcl %edi, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 36(%eax), %edi, %ebp
+ adcl %esi, %edi
+ movl %ebx, %edx
+ mulxl 40(%eax), %esi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %ebp, %esi
+ movl %ebx, %edx
+ mulxl 44(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 36(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 32(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 28(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%ecx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%ecx)
+ movl %edi, 36(%ecx)
+ movl %esi, 40(%ecx)
+ movl %edx, 44(%ecx)
+ movl %ebx, %edx
+ mulxl 48(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ adcl $0, %edx
+ movl %edx, 52(%ecx)
+ movl %ecx, %eax
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end190:
+ .size .LmulPv416x32, .Lfunc_end190-.LmulPv416x32
+
+ .globl mcl_fp_mulUnitPre13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre13Lbmi2,@function
+mcl_fp_mulUnitPre13Lbmi2: # @mcl_fp_mulUnitPre13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ calll .L191$pb
+.L191$pb:
+ popl %ebx
+.Ltmp32:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp32-.L191$pb), %ebx
+ movl 136(%esp), %eax
+ movl %eax, (%esp)
+ leal 48(%esp), %ecx
+ movl 132(%esp), %edx
+ calll .LmulPv416x32
+ movl 100(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp
+ movl 64(%esp), %ebx
+ movl 60(%esp), %edi
+ movl 56(%esp), %esi
+ movl 48(%esp), %edx
+ movl 52(%esp), %ecx
+ movl 128(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end191:
+ .size mcl_fp_mulUnitPre13Lbmi2, .Lfunc_end191-mcl_fp_mulUnitPre13Lbmi2
+
+ .globl mcl_fpDbl_mulPre13Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre13Lbmi2,@function
+mcl_fpDbl_mulPre13Lbmi2: # @mcl_fpDbl_mulPre13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $844, %esp # imm = 0x34C
+ calll .L192$pb
+.L192$pb:
+ popl %edi
+.Ltmp33:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp33-.L192$pb), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 872(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 868(%esp), %edx
+ movl %edx, %esi
+ movl %edi, %ebx
+ calll .LmulPv416x32
+ movl 836(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 784(%esp), %eax
+ movl 788(%esp), %ebp
+ movl 864(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 872(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 728(%esp), %ecx
+ movl %esi, %edx
+ movl %edi, %ebx
+ calll .LmulPv416x32
+ addl 728(%esp), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 780(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 776(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 748(%esp), %edi
+ movl 744(%esp), %esi
+ movl 740(%esp), %edx
+ movl 732(%esp), %eax
+ movl 736(%esp), %ecx
+ movl 864(%esp), %ebp
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 724(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 720(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 716(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 712(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 708(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 696(%esp), %ebx
+ movl 692(%esp), %edi
+ movl 688(%esp), %esi
+ movl 684(%esp), %edx
+ movl 676(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 616(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 660(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 656(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 652(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 648(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 644(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 640(%esp), %ebx
+ movl 636(%esp), %edi
+ movl 632(%esp), %esi
+ movl 628(%esp), %edx
+ movl 620(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 624(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 560(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 584(%esp), %ebx
+ movl 580(%esp), %edi
+ movl 576(%esp), %esi
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 504(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 528(%esp), %ebx
+ movl 524(%esp), %edi
+ movl 520(%esp), %esi
+ movl 516(%esp), %edx
+ movl 508(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 512(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 448(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 448(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 472(%esp), %ebp
+ movl 468(%esp), %edi
+ movl 464(%esp), %esi
+ movl 460(%esp), %edx
+ movl 452(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 456(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 392(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 444(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %ebx
+ movl 412(%esp), %edi
+ movl 408(%esp), %esi
+ movl 404(%esp), %edx
+ movl 396(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 400(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 336(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 336(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 360(%esp), %ebp
+ movl 356(%esp), %edi
+ movl 352(%esp), %esi
+ movl 348(%esp), %edx
+ movl 340(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 280(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 304(%esp), %ebx
+ movl 300(%esp), %edi
+ movl 296(%esp), %esi
+ movl 292(%esp), %edx
+ movl 284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 288(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 224(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 248(%esp), %ebx
+ movl 244(%esp), %edi
+ movl 240(%esp), %esi
+ movl 236(%esp), %edx
+ movl 228(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 232(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %edi
+ movl 44(%edi), %eax
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 868(%esp), %eax
+ movl %eax, %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %esi # 4-byte Reload
+ addl 168(%esp), %esi
+ movl 220(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 204(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 196(%esp), %ebp
+ movl 192(%esp), %ebx
+ movl 188(%esp), %edi
+ movl 184(%esp), %edx
+ movl 180(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %esi, 44(%eax)
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 104(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 112(%esp), %esi
+ movl %esi, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 120(%esp), %edi
+ movl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 148(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 136(%esp), %ebx
+ movl 132(%esp), %esi
+ movl 128(%esp), %edx
+ movl 124(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %ebp, 48(%eax)
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 56(%eax)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ adcl 104(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 68(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 72(%eax)
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 84(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl %ecx, 92(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 100(%eax)
+ addl $844, %esp # imm = 0x34C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end192:
+ .size mcl_fpDbl_mulPre13Lbmi2, .Lfunc_end192-mcl_fpDbl_mulPre13Lbmi2
+
+ .globl mcl_fpDbl_sqrPre13Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre13Lbmi2,@function
+mcl_fpDbl_sqrPre13Lbmi2: # @mcl_fpDbl_sqrPre13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $844, %esp # imm = 0x34C
+ calll .L193$pb
+.L193$pb:
+ popl %ebx
+.Ltmp34:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp34-.L193$pb), %ebx
+ movl %ebx, 108(%esp) # 4-byte Spill
+ movl 868(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl %edx, %edi
+ movl %ebx, %esi
+ calll .LmulPv416x32
+ movl 836(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 784(%esp), %eax
+ movl 788(%esp), %ebp
+ movl 864(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 728(%esp), %ecx
+ movl %esi, %ebx
+ calll .LmulPv416x32
+ addl 728(%esp), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 780(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 776(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 748(%esp), %edi
+ movl 744(%esp), %esi
+ movl 740(%esp), %edx
+ movl 732(%esp), %eax
+ movl 736(%esp), %ecx
+ movl 864(%esp), %ebp
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 724(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 720(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 716(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 712(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 708(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 696(%esp), %ebx
+ movl 692(%esp), %edi
+ movl 688(%esp), %esi
+ movl 684(%esp), %edx
+ movl 676(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 616(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 660(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 656(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 652(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 648(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 644(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 640(%esp), %ebx
+ movl 636(%esp), %edi
+ movl 632(%esp), %esi
+ movl 628(%esp), %edx
+ movl 620(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 624(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 560(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 584(%esp), %ebx
+ movl 580(%esp), %edi
+ movl 576(%esp), %esi
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 504(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 528(%esp), %ebx
+ movl 524(%esp), %edi
+ movl 520(%esp), %esi
+ movl 516(%esp), %edx
+ movl 508(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 512(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 448(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 448(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 472(%esp), %ebp
+ movl 468(%esp), %edi
+ movl 464(%esp), %esi
+ movl 460(%esp), %edx
+ movl 452(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 456(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 392(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 444(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %ebx
+ movl 412(%esp), %edi
+ movl 408(%esp), %esi
+ movl 404(%esp), %edx
+ movl 396(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 400(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 336(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 336(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 360(%esp), %ebp
+ movl 356(%esp), %edi
+ movl 352(%esp), %esi
+ movl 348(%esp), %edx
+ movl 340(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 280(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 304(%esp), %ebx
+ movl 300(%esp), %edi
+ movl 296(%esp), %esi
+ movl 292(%esp), %edx
+ movl 284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 288(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 224(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 248(%esp), %ebx
+ movl 244(%esp), %edi
+ movl 240(%esp), %esi
+ movl 236(%esp), %edx
+ movl 228(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 232(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 44(%edx), %eax
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %esi # 4-byte Reload
+ addl 168(%esp), %esi
+ movl 220(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 204(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 196(%esp), %ebp
+ movl 192(%esp), %ebx
+ movl 188(%esp), %edi
+ movl 184(%esp), %edx
+ movl 180(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %esi, 44(%eax)
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 104(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 48(%edx), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 112(%esp), %esi
+ movl %esi, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 120(%esp), %edi
+ movl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 148(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 136(%esp), %ebx
+ movl 132(%esp), %esi
+ movl 128(%esp), %edx
+ movl 124(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %ebp, 48(%eax)
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 56(%eax)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ adcl 104(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 68(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 72(%eax)
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 84(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl %ecx, 92(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 100(%eax)
+ addl $844, %esp # imm = 0x34C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end193:
+ .size mcl_fpDbl_sqrPre13Lbmi2, .Lfunc_end193-mcl_fpDbl_sqrPre13Lbmi2
+
+ .globl mcl_fp_mont13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont13Lbmi2,@function
+mcl_fp_mont13Lbmi2: # @mcl_fp_mont13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1548, %esp # imm = 0x60C
+ calll .L194$pb
+.L194$pb:
+ popl %ebx
+.Ltmp35:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp35-.L194$pb), %ebx
+ movl 1580(%esp), %eax
+ movl -4(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1488(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1488(%esp), %esi
+ movl 1492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull %edi, %eax
+ movl 1540(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1536(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1532(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1528(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1524(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1520(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1516(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1512(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1508(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1504(%esp), %edi
+ movl 1500(%esp), %ebp
+ movl 1496(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1432(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1432(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1440(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1444(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 1448(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1472(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1484(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 1576(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1376(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1376(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1388(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1404(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1412(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 1420(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1428(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1320(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1320(%esp), %ecx
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1328(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1336(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 1340(%esp), %esi
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1344(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 1348(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1352(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1360(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1364(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1368(%esp), %ebp
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1372(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1264(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1280(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1300(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1308(%esp), %ebp
+ adcl 1312(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 1580(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv416x32
+ movl 84(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1208(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1212(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1216(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 1224(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1228(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 1244(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1248(%esp), %edi
+ adcl 1252(%esp), %ebp
+ movl %ebp, %esi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1256(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1260(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1152(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1152(%esp), %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1188(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1192(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1200(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1096(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 1096(%esp), %esi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1100(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1104(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 1108(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1112(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1116(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1120(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1124(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1128(%esp), %edi
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1132(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1136(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1140(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1144(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1148(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1040(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 1040(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1064(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 1068(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 1072(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1084(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %edi
+ addl 984(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 996(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1028(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 1576(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 936(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 944(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 980(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 872(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %edi
+ addl 872(%esp), %ebp
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 884(%esp), %ebp
+ adcl 888(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 904(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1572(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv416x32
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 816(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 824(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 844(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 848(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 856(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 760(%esp), %ecx
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 764(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 768(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 772(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 776(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 784(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 792(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 796(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 800(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 804(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 808(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 812(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 36(%esp), %eax # 4-byte Reload
+ addl 704(%esp), %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 708(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 712(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 720(%esp), %ebp
+ adcl 724(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 728(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 732(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 736(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 740(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 744(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 748(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 752(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %eax, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 648(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 652(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 656(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 660(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 664(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 672(%esp), %edi
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 676(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 592(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 600(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 612(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ adcl 616(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 620(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 536(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 536(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 544(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 560(%esp), %esi
+ adcl 564(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 572(%esp), %edi
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 576(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 480(%esp), %ecx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 500(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 512(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 516(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 424(%esp), %esi
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 428(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 444(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ addl 368(%esp), %ebp
+ adcl 372(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 376(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 392(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl 52(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 312(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 320(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 328(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 336(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 340(%esp), %edi
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 344(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 256(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 268(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 280(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 284(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 288(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 200(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 208(%esp), %ebp
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 212(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 236(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 144(%esp), %ecx
+ adcl 148(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 152(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 156(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 176(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %edi
+ addl 88(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 92(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 100(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 104(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl 112(%esp), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl 116(%esp), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 120(%esp), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl 124(%esp), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 128(%esp), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 132(%esp), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 136(%esp), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ adcl 140(%esp), %ebx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 1580(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %ecx
+ sbbl 8(%ebx), %ebp
+ sbbl 12(%ebx), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ sbbl 20(%ebx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ sbbl 24(%ebx), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ sbbl 28(%ebx), %edx
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 32(%ebx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 36(%ebx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 40(%ebx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 44(%ebx), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ sbbl 48(%ebx), %esi
+ movl %esi, %ebx
+ sbbl $0, %edi
+ andl $1, %edi
+ jne .LBB194_2
+# BB#1:
+ movl %edx, 32(%esp) # 4-byte Spill
+.LBB194_2:
+ movl %edi, %edx
+ testb %dl, %dl
+ movl 80(%esp), %edx # 4-byte Reload
+ jne .LBB194_4
+# BB#3:
+ movl %eax, %edx
+.LBB194_4:
+ movl 1568(%esp), %eax
+ movl %edx, (%eax)
+ movl 64(%esp), %esi # 4-byte Reload
+ jne .LBB194_6
+# BB#5:
+ movl %ecx, %esi
+.LBB194_6:
+ movl %esi, 4(%eax)
+ jne .LBB194_8
+# BB#7:
+ movl %ebp, 76(%esp) # 4-byte Spill
+.LBB194_8:
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB194_10
+# BB#9:
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+.LBB194_10:
+ movl 84(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ jne .LBB194_12
+# BB#11:
+ movl 8(%esp), %ebp # 4-byte Reload
+.LBB194_12:
+ movl %ebp, 16(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB194_14
+# BB#13:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB194_14:
+ movl %ecx, 20(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB194_16
+# BB#15:
+ movl 16(%esp), %ecx # 4-byte Reload
+.LBB194_16:
+ movl %ecx, 24(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB194_18
+# BB#17:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB194_18:
+ movl %ecx, 32(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB194_20
+# BB#19:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB194_20:
+ movl %ecx, 36(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB194_22
+# BB#21:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB194_22:
+ movl %ecx, 40(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ jne .LBB194_24
+# BB#23:
+ movl 72(%esp), %ecx # 4-byte Reload
+.LBB194_24:
+ movl %ecx, 44(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB194_26
+# BB#25:
+ movl %ebx, %ecx
+.LBB194_26:
+ movl %ecx, 48(%eax)
+ addl $1548, %esp # imm = 0x60C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end194:
+ .size mcl_fp_mont13Lbmi2, .Lfunc_end194-mcl_fp_mont13Lbmi2
+
+ .globl mcl_fp_montNF13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF13Lbmi2,@function
+mcl_fp_montNF13Lbmi2: # @mcl_fp_montNF13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1548, %esp # imm = 0x60C
+ calll .L195$pb
+.L195$pb:
+ popl %ebx
+.Ltmp36:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp36-.L195$pb), %ebx
+ movl 1580(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1488(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1488(%esp), %edi
+ movl 1492(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1540(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1536(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1532(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1528(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1524(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1520(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1516(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1512(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1508(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1504(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1500(%esp), %esi
+ movl 1496(%esp), %ebp
+ movl %eax, (%esp)
+ leal 1432(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1432(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1440(%esp), %ebp
+ adcl 1444(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1448(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1472(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 1484(%esp), %edi
+ movl 1576(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1376(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1428(%esp), %ecx
+ movl 80(%esp), %edx # 4-byte Reload
+ addl 1376(%esp), %edx
+ adcl 1380(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1412(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1424(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1320(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1320(%esp), %esi
+ adcl 1324(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 1340(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1360(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1316(%esp), %eax
+ addl 1264(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1268(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1272(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1276(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 1280(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 1284(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1288(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 1300(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1312(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1208(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 1228(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1244(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1248(%esp), %esi
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 1252(%esp), %edi
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1256(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1152(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1204(%esp), %eax
+ movl 64(%esp), %edx # 4-byte Reload
+ addl 1152(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1156(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1160(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1164(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1168(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1172(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1176(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1180(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1184(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1188(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 1192(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ adcl 1196(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1200(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1096(%esp), %ecx
+ movl 1580(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv416x32
+ addl 1096(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 1116(%esp), %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1120(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1140(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1148(%esp), %ebp
+ movl 1576(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1040(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1092(%esp), %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ addl 1040(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1044(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 1056(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 1060(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1064(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1072(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1088(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl %eax, %esi
+ adcl $0, %esi
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 984(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 996(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1008(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1036(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 980(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 932(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 936(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 940(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 944(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 948(%esp), %ebp
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 960(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 968(%esp), %esi
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 972(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 976(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 872(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 872(%esp), %edi
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 876(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 892(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 912(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 868(%esp), %edx
+ addl 816(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 832(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 836(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 860(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 760(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 780(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 784(%esp), %esi
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 788(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 804(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 756(%esp), %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 704(%esp), %ecx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 708(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 712(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 716(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 720(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 724(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 728(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 732(%esp), %esi
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 736(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 740(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 744(%esp), %ebp
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 748(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 752(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 648(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 676(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 688(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 696(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 644(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 592(%esp), %ecx
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 596(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 624(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 636(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 536(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 536(%esp), %edi
+ adcl 540(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 556(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 564(%esp), %esi
+ adcl 568(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 572(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 532(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 480(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 496(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 504(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 508(%esp), %edi
+ adcl 512(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 528(%esp), %ebp
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 424(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 452(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 460(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 472(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 420(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 368(%esp), %ecx
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 372(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 392(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 400(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 404(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 312(%esp), %esi
+ adcl 316(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 320(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 348(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 308(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 256(%esp), %ecx
+ adcl 260(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 272(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 288(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 200(%esp), %esi
+ adcl 204(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 216(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 228(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 196(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 144(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 152(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 156(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 164(%esp), %ebp
+ adcl 168(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 88(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 92(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 100(%esp), %edi
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 104(%esp), %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 132(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1580(%esp), %eax
+ subl (%eax), %edx
+ movl %ecx, %ebp
+ sbbl 4(%eax), %ebp
+ movl %edi, %ecx
+ sbbl 8(%eax), %ecx
+ sbbl 12(%eax), %ebx
+ sbbl 16(%eax), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 20(%eax), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 24(%eax), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 28(%eax), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 32(%eax), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 36(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ sbbl 40(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ sbbl 44(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ sbbl 48(%eax), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl %esi, %eax
+ sarl $31, %eax
+ testl %eax, %eax
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB195_2
+# BB#1:
+ movl %edx, %eax
+.LBB195_2:
+ movl 1568(%esp), %edx
+ movl %eax, (%edx)
+ movl 80(%esp), %esi # 4-byte Reload
+ js .LBB195_4
+# BB#3:
+ movl %ebp, %esi
+.LBB195_4:
+ movl %esi, 4(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB195_6
+# BB#5:
+ movl %ecx, %edi
+.LBB195_6:
+ movl %edi, 8(%edx)
+ js .LBB195_8
+# BB#7:
+ movl %ebx, %eax
+.LBB195_8:
+ movl %eax, 12(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB195_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB195_10:
+ movl %eax, 16(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB195_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB195_12:
+ movl %eax, 20(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB195_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB195_14:
+ movl %eax, 24(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB195_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB195_16:
+ movl %eax, 28(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB195_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB195_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB195_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB195_20:
+ movl %eax, 36(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB195_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB195_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB195_24
+# BB#23:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB195_24:
+ movl %eax, 44(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB195_26
+# BB#25:
+ movl 68(%esp), %eax # 4-byte Reload
+.LBB195_26:
+ movl %eax, 48(%edx)
+ addl $1548, %esp # imm = 0x60C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end195:
+ .size mcl_fp_montNF13Lbmi2, .Lfunc_end195-mcl_fp_montNF13Lbmi2
+
+ .globl mcl_fp_montRed13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed13Lbmi2,@function
+mcl_fp_montRed13Lbmi2: # @mcl_fp_montRed13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $892, %esp # imm = 0x37C
+ calll .L196$pb
+.L196$pb:
+ popl %eax
+.Ltmp37:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp37-.L196$pb), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 920(%esp), %edx
+ movl -4(%edx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 916(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ imull %eax, %ebx
+ movl 100(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 144(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 68(%ecx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ movl %esi, 148(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 152(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 44(%ecx), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ movl 40(%ecx), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 36(%ecx), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %edi
+ movl 20(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 8(%ecx), %esi
+ movl (%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 832(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 76(%esp), %eax # 4-byte Reload
+ addl 832(%esp), %eax
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 836(%esp), %ecx
+ adcl 840(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 856(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 860(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 776(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ andl $1, %esi
+ addl 776(%esp), %edi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %edi
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 720(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 720(%esp), %esi
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 724(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 664(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 608(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 612(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 552(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 496(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ adcl 532(%esp), %ebp
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl 536(%esp), %edi
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 440(%esp), %esi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 472(%esp), %ebp
+ movl %ebp, 152(%esp) # 4-byte Spill
+ adcl 476(%esp), %edi
+ movl %edi, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 384(%esp), %esi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 388(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ adcl 404(%esp), %ebp
+ movl 140(%esp), %edi # 4-byte Reload
+ adcl 408(%esp), %edi
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %esi # 4-byte Reload
+ adcl 420(%esp), %esi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 920(%esp), %eax
+ movl %eax, %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 328(%esp), %eax
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 344(%esp), %ebp
+ movl %ebp, 156(%esp) # 4-byte Spill
+ adcl 348(%esp), %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl 360(%esp), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ movl 72(%esp), %esi # 4-byte Reload
+ imull %esi, %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 272(%esp), %edi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl 280(%esp), %edi
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 288(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 292(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 296(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 300(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 304(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 308(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 312(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 316(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull %esi, %eax
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 216(%esp), %ebp
+ movl %edi, %ecx
+ adcl 220(%esp), %ecx
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %ebp # 4-byte Reload
+ adcl 228(%esp), %ebp
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 244(%esp), %edi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 160(%esp), %esi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl 168(%esp), %ebp
+ movl %ebp, 140(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp # 4-byte Reload
+ adcl 176(%esp), %ebp
+ movl %ebp, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl %edi, %eax
+ adcl 184(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 156(%esp), %edi # 4-byte Reload
+ subl 12(%esp), %edi # 4-byte Folded Reload
+ sbbl 4(%esp), %ebx # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ sbbl 16(%esp), %ebp # 4-byte Folded Reload
+ sbbl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 132(%esp), %edx # 4-byte Reload
+ sbbl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 144(%esp), %edx # 4-byte Reload
+ sbbl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 136(%esp), %edx # 4-byte Reload
+ sbbl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 116(%esp), %edx # 4-byte Reload
+ sbbl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ sbbl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ sbbl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ sbbl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 120(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl %esi, %eax
+ sbbl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 124(%esp) # 4-byte Spill
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB196_2
+# BB#1:
+ movl %ebp, 148(%esp) # 4-byte Spill
+.LBB196_2:
+ testb %al, %al
+ movl 156(%esp), %ebp # 4-byte Reload
+ jne .LBB196_4
+# BB#3:
+ movl %edi, %ebp
+.LBB196_4:
+ movl 912(%esp), %edi
+ movl %ebp, (%edi)
+ movl 140(%esp), %ebp # 4-byte Reload
+ jne .LBB196_6
+# BB#5:
+ movl %ebx, %ebp
+.LBB196_6:
+ movl %ebp, 4(%edi)
+ movl 152(%esp), %ebx # 4-byte Reload
+ jne .LBB196_8
+# BB#7:
+ movl %ecx, %ebx
+.LBB196_8:
+ movl %ebx, 8(%edi)
+ movl 148(%esp), %esi # 4-byte Reload
+ movl %esi, 12(%edi)
+ movl 116(%esp), %ebx # 4-byte Reload
+ movl 128(%esp), %esi # 4-byte Reload
+ jne .LBB196_10
+# BB#9:
+ movl 72(%esp), %esi # 4-byte Reload
+.LBB196_10:
+ movl %esi, 16(%edi)
+ movl 112(%esp), %esi # 4-byte Reload
+ movl 132(%esp), %edx # 4-byte Reload
+ jne .LBB196_12
+# BB#11:
+ movl 76(%esp), %edx # 4-byte Reload
+.LBB196_12:
+ movl %edx, 20(%edi)
+ movl 96(%esp), %edx # 4-byte Reload
+ movl 144(%esp), %ecx # 4-byte Reload
+ jne .LBB196_14
+# BB#13:
+ movl 80(%esp), %ecx # 4-byte Reload
+.LBB196_14:
+ movl %ecx, 24(%edi)
+ movl 100(%esp), %ecx # 4-byte Reload
+ movl 136(%esp), %eax # 4-byte Reload
+ jne .LBB196_16
+# BB#15:
+ movl 84(%esp), %eax # 4-byte Reload
+.LBB196_16:
+ movl %eax, 28(%edi)
+ movl 92(%esp), %eax # 4-byte Reload
+ jne .LBB196_18
+# BB#17:
+ movl 88(%esp), %ebx # 4-byte Reload
+.LBB196_18:
+ movl %ebx, 32(%edi)
+ jne .LBB196_20
+# BB#19:
+ movl 104(%esp), %esi # 4-byte Reload
+.LBB196_20:
+ movl %esi, 36(%edi)
+ jne .LBB196_22
+# BB#21:
+ movl 108(%esp), %edx # 4-byte Reload
+.LBB196_22:
+ movl %edx, 40(%edi)
+ jne .LBB196_24
+# BB#23:
+ movl 120(%esp), %ecx # 4-byte Reload
+.LBB196_24:
+ movl %ecx, 44(%edi)
+ jne .LBB196_26
+# BB#25:
+ movl 124(%esp), %eax # 4-byte Reload
+.LBB196_26:
+ movl %eax, 48(%edi)
+ addl $892, %esp # imm = 0x37C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end196:
+ .size mcl_fp_montRed13Lbmi2, .Lfunc_end196-mcl_fp_montRed13Lbmi2
+
+ .globl mcl_fp_addPre13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre13Lbmi2,@function
+mcl_fp_addPre13Lbmi2: # @mcl_fp_addPre13Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ebx)
+ movl 40(%ecx), %esi
+ adcl %edi, %esi
+ movl 44(%eax), %edi
+ movl %edx, 36(%ebx)
+ movl 44(%ecx), %edx
+ adcl %edi, %edx
+ movl %esi, 40(%ebx)
+ movl %edx, 44(%ebx)
+ movl 48(%eax), %eax
+ movl 48(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 48(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end197:
+ .size mcl_fp_addPre13Lbmi2, .Lfunc_end197-mcl_fp_addPre13Lbmi2
+
+ .globl mcl_fp_subPre13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre13Lbmi2,@function
+mcl_fp_subPre13Lbmi2: # @mcl_fp_subPre13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ebp)
+ movl 40(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 44(%edx), %ebx
+ movl %esi, 36(%ebp)
+ movl 44(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edi, 40(%ebp)
+ movl %esi, 44(%ebp)
+ movl 48(%edx), %edx
+ movl 48(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 48(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end198:
+ .size mcl_fp_subPre13Lbmi2, .Lfunc_end198-mcl_fp_subPre13Lbmi2
+
+ .globl mcl_fp_shr1_13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_13Lbmi2,@function
+mcl_fp_shr1_13Lbmi2: # @mcl_fp_shr1_13Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl %esi, 44(%ecx)
+ shrl %eax
+ movl %eax, 48(%ecx)
+ popl %esi
+ retl
+.Lfunc_end199:
+ .size mcl_fp_shr1_13Lbmi2, .Lfunc_end199-mcl_fp_shr1_13Lbmi2
+
+ .globl mcl_fp_add13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add13Lbmi2,@function
+mcl_fp_add13Lbmi2: # @mcl_fp_add13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 68(%esp), %ebp
+ movl (%ebp), %ecx
+ movl 4(%ebp), %eax
+ movl 64(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 4(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 8(%ebp), %eax
+ adcl 8(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %eax
+ adcl 12(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 16(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ adcl 20(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 24(%ebx), %eax
+ adcl 24(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 28(%ebx), %eax
+ adcl 28(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 32(%ebx), %eax
+ adcl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 36(%ebx), %ecx
+ adcl 36(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 40(%ebx), %edi
+ adcl 40(%ebp), %edi
+ movl 44(%ebx), %edx
+ adcl 44(%ebp), %edx
+ movl 48(%ebx), %esi
+ adcl 48(%ebp), %esi
+ movl 60(%esp), %ebp
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%ebp)
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ebp)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebp)
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebp)
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebp)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebp)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%ebp)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebp)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%ebp)
+ movl %ecx, 36(%ebp)
+ movl %edi, 40(%ebp)
+ movl %edx, 44(%ebp)
+ movl %esi, 48(%ebp)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 72(%esp), %ecx
+ subl (%ecx), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ sbbl 4(%ecx), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ebx # 4-byte Reload
+ sbbl 8(%ecx), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %ebx # 4-byte Reload
+ sbbl 12(%ecx), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %ebx # 4-byte Reload
+ sbbl 16(%ecx), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %ebx # 4-byte Reload
+ sbbl 20(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %ebx # 4-byte Reload
+ sbbl 24(%ecx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ecx), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebx # 4-byte Reload
+ sbbl 32(%ecx), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl (%esp), %ebx # 4-byte Reload
+ sbbl 36(%ecx), %ebx
+ sbbl 40(%ecx), %edi
+ sbbl 44(%ecx), %edx
+ sbbl 48(%ecx), %esi
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB200_2
+# BB#1: # %nocarry
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ebp)
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ebp)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebp)
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebp)
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebp)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebp)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%ebp)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebp)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%ebp)
+ movl %ebx, 36(%ebp)
+ movl %edi, 40(%ebp)
+ movl %edx, 44(%ebp)
+ movl %esi, 48(%ebp)
+.LBB200_2: # %carry
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end200:
+ .size mcl_fp_add13Lbmi2, .Lfunc_end200-mcl_fp_add13Lbmi2
+
+ .globl mcl_fp_addNF13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF13Lbmi2,@function
+mcl_fp_addNF13Lbmi2: # @mcl_fp_addNF13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 128(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ movl 124(%esp), %edx
+ addl (%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 4(%edx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 48(%esi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 36(%esi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 32(%esi), %ebp
+ movl 28(%esi), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ movl 20(%esi), %ebx
+ movl 16(%esi), %edi
+ movl 12(%esi), %ecx
+ movl 8(%esi), %esi
+ adcl 8(%edx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 12(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 16(%edx), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 20(%edx), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ adcl 24(%edx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 28(%edx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 32(%edx), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 36(%edx), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 40(%edx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%edx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 48(%edx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 132(%esp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ subl (%edx), %eax
+ movl 68(%esp), %ebp # 4-byte Reload
+ sbbl 4(%edx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ sbbl 8(%edx), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ sbbl 12(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 16(%edx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%edx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ sbbl 24(%edx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 32(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ movl %esi, %ecx
+ movl %esi, %ebp
+ sbbl 36(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ movl %esi, %ecx
+ movl %esi, %edi
+ sbbl 40(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 48(%edx), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ movl 64(%esp), %edx # 4-byte Reload
+ js .LBB201_2
+# BB#1:
+ movl %eax, %edx
+.LBB201_2:
+ movl 120(%esp), %esi
+ movl %edx, (%esi)
+ movl 68(%esp), %edx # 4-byte Reload
+ js .LBB201_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+.LBB201_4:
+ movl %edx, 4(%esi)
+ movl %edi, %edx
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB201_6
+# BB#5:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB201_6:
+ movl %eax, 8(%esi)
+ movl %ebp, %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB201_8
+# BB#7:
+ movl 8(%esp), %ebx # 4-byte Reload
+.LBB201_8:
+ movl %ebx, 12(%esi)
+ movl 96(%esp), %ebp # 4-byte Reload
+ movl 56(%esp), %ecx # 4-byte Reload
+ js .LBB201_10
+# BB#9:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB201_10:
+ movl %ecx, 16(%esi)
+ movl 92(%esp), %ecx # 4-byte Reload
+ js .LBB201_12
+# BB#11:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB201_12:
+ movl %eax, 20(%esi)
+ movl 72(%esp), %ebx # 4-byte Reload
+ js .LBB201_14
+# BB#13:
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+.LBB201_14:
+ movl 76(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%esi)
+ js .LBB201_16
+# BB#15:
+ movl 24(%esp), %ebp # 4-byte Reload
+.LBB201_16:
+ movl %ebp, 28(%esi)
+ js .LBB201_18
+# BB#17:
+ movl 28(%esp), %ebx # 4-byte Reload
+.LBB201_18:
+ movl %ebx, 32(%esi)
+ js .LBB201_20
+# BB#19:
+ movl 32(%esp), %edi # 4-byte Reload
+.LBB201_20:
+ movl %edi, 36(%esi)
+ js .LBB201_22
+# BB#21:
+ movl 36(%esp), %edx # 4-byte Reload
+.LBB201_22:
+ movl %edx, 40(%esi)
+ js .LBB201_24
+# BB#23:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB201_24:
+ movl %ecx, 44(%esi)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB201_26
+# BB#25:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB201_26:
+ movl %eax, 48(%esi)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end201:
+ .size mcl_fp_addNF13Lbmi2, .Lfunc_end201-mcl_fp_addNF13Lbmi2
+
+ .globl mcl_fp_sub13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub13Lbmi2,@function
+mcl_fp_sub13Lbmi2: # @mcl_fp_sub13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 68(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 72(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 32(%esi), %edx
+ sbbl 32(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 36(%esi), %ecx
+ sbbl 36(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 44(%esi), %ebp
+ sbbl 44(%edi), %ebp
+ movl 48(%esi), %esi
+ sbbl 48(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 64(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl %edx, 32(%ebx)
+ movl %ecx, 36(%ebx)
+ movl %eax, 40(%ebx)
+ movl %ebp, 44(%ebx)
+ movl %esi, 48(%ebx)
+ je .LBB202_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 76(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl %ecx, 40(%ebx)
+ movl 44(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 44(%ebx)
+ movl 48(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 48(%ebx)
+.LBB202_2: # %nocarry
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end202:
+ .size mcl_fp_sub13Lbmi2, .Lfunc_end202-mcl_fp_sub13Lbmi2
+
+ .globl mcl_fp_subNF13Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF13Lbmi2,@function
+mcl_fp_subNF13Lbmi2: # @mcl_fp_subNF13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $80, %esp
+ movl 104(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 108(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 32(%ecx), %ebp
+ movl 28(%ecx), %ebx
+ movl 24(%ecx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 24(%edi), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ sbbl 32(%edi), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ sbbl 48(%edi), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sarl $31, %esi
+ movl %esi, %ecx
+ shldl $1, %edx, %ecx
+ movl 112(%esp), %edi
+ movl 4(%edi), %eax
+ andl %ecx, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ andl (%edi), %ecx
+ movl 48(%edi), %eax
+ andl %esi, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 44(%edi), %eax
+ andl %esi, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 40(%edi), %eax
+ andl %esi, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 36(%edi), %eax
+ andl %esi, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 32(%edi), %eax
+ andl %esi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 28(%edi), %eax
+ andl %esi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 24(%edi), %ebp
+ andl %esi, %ebp
+ movl 20(%edi), %ebx
+ andl %esi, %ebx
+ movl 16(%edi), %edx
+ andl %esi, %edx
+ rorxl $31, %esi, %eax
+ andl 12(%edi), %esi
+ andl 8(%edi), %eax
+ addl 48(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 100(%esp), %edi
+ movl %ecx, (%edi)
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%edi)
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 8(%edi)
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 12(%edi)
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, 16(%edi)
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %ebx, 20(%edi)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 24(%edi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%edi)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%edi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%edi)
+ movl %eax, 44(%edi)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%edi)
+ addl $80, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end203:
+ .size mcl_fp_subNF13Lbmi2, .Lfunc_end203-mcl_fp_subNF13Lbmi2
+
+ .globl mcl_fpDbl_add13Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add13Lbmi2,@function
+mcl_fpDbl_add13Lbmi2: # @mcl_fpDbl_add13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $96, %esp
+ movl 124(%esp), %ecx
+ movl 120(%esp), %esi
+ movl 12(%esi), %edi
+ movl 16(%esi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%esi), %ebp
+ movl 116(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%esi), %ebp
+ adcl 8(%esi), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 60(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%esi), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%esi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%esi), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%esi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%esi), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%esi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%esi), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %edx, 40(%eax)
+ movl 48(%esi), %edx
+ adcl %ebx, %edx
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%esi), %edi
+ adcl %ebx, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 56(%ecx), %edi
+ movl %edx, 48(%eax)
+ movl 56(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 64(%ecx), %edx
+ movl 64(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 68(%ecx), %edx
+ movl 68(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 72(%ecx), %edx
+ movl 72(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%ecx), %edx
+ movl 76(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%ecx), %edx
+ movl 80(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%ecx), %edx
+ movl 84(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%ecx), %edx
+ movl 88(%esi), %edi
+ adcl %edx, %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 92(%ecx), %edx
+ movl 92(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 96(%ecx), %edx
+ movl 96(%esi), %ebx
+ adcl %edx, %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 100(%ecx), %ecx
+ movl 100(%esi), %esi
+ adcl %ecx, %esi
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 128(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ subl (%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ sbbl 36(%ebp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sbbl 40(%ebp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl %esi, %ebx
+ sbbl 44(%ebp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ sbbl 48(%ebp), %ecx
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB204_2
+# BB#1:
+ movl %ecx, %ebx
+.LBB204_2:
+ testb %dl, %dl
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB204_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB204_4:
+ movl 116(%esp), %eax
+ movl %ecx, 52(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl 88(%esp), %ecx # 4-byte Reload
+ movl %ecx, 64(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ movl %ecx, 68(%eax)
+ movl %ebp, 72(%eax)
+ movl %edi, 76(%eax)
+ movl %esi, 80(%eax)
+ movl %edx, 84(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ jne .LBB204_6
+# BB#5:
+ movl 36(%esp), %esi # 4-byte Reload
+.LBB204_6:
+ movl %esi, 88(%eax)
+ jne .LBB204_8
+# BB#7:
+ movl 40(%esp), %edx # 4-byte Reload
+.LBB204_8:
+ movl %edx, 92(%eax)
+ jne .LBB204_10
+# BB#9:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB204_10:
+ movl %ecx, 96(%eax)
+ movl %ebx, 100(%eax)
+ addl $96, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end204:
+ .size mcl_fpDbl_add13Lbmi2, .Lfunc_end204-mcl_fpDbl_add13Lbmi2
+
+ .globl mcl_fpDbl_sub13Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub13Lbmi2,@function
+mcl_fpDbl_sub13Lbmi2: # @mcl_fpDbl_sub13Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %edx
+ movl 112(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %edx
+ movl 8(%edi), %esi
+ sbbl 8(%ebx), %esi
+ movl 104(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edi), %eax
+ sbbl 12(%ebx), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%edi), %edx
+ sbbl 16(%ebx), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebx), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%edi), %eax
+ sbbl %esi, %eax
+ movl 24(%ebx), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%edi), %edx
+ sbbl %esi, %edx
+ movl 28(%ebx), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%edi), %eax
+ sbbl %esi, %eax
+ movl 32(%ebx), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%edi), %edx
+ sbbl %esi, %edx
+ movl 36(%ebx), %esi
+ movl %eax, 28(%ecx)
+ movl 36(%edi), %eax
+ sbbl %esi, %eax
+ movl 40(%ebx), %esi
+ movl %edx, 32(%ecx)
+ movl 40(%edi), %edx
+ sbbl %esi, %edx
+ movl 44(%ebx), %esi
+ movl %eax, 36(%ecx)
+ movl 44(%edi), %eax
+ sbbl %esi, %eax
+ movl 48(%ebx), %esi
+ movl %edx, 40(%ecx)
+ movl 48(%edi), %edx
+ sbbl %esi, %edx
+ movl 52(%ebx), %esi
+ movl %eax, 44(%ecx)
+ movl 52(%edi), %eax
+ sbbl %esi, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 56(%ebx), %eax
+ movl %edx, 48(%ecx)
+ movl 56(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 60(%ebx), %eax
+ movl 60(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 64(%ebx), %eax
+ movl 64(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 68(%ebx), %eax
+ movl 68(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 72(%ebx), %eax
+ movl 72(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 76(%ebx), %eax
+ movl 76(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 80(%ebx), %eax
+ movl 80(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 84(%ebx), %eax
+ movl 84(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 88(%ebx), %eax
+ movl 88(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 92(%ebx), %eax
+ movl 92(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 96(%ebx), %eax
+ movl 96(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 100(%ebx), %eax
+ movl 100(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 116(%esp), %edi
+ jne .LBB205_1
+# BB#2:
+ movl $0, 44(%esp) # 4-byte Folded Spill
+ jmp .LBB205_3
+.LBB205_1:
+ movl 48(%edi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+.LBB205_3:
+ testb %al, %al
+ jne .LBB205_4
+# BB#5:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ movl $0, %ebx
+ jmp .LBB205_6
+.LBB205_4:
+ movl (%edi), %ebx
+ movl 4(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB205_6:
+ jne .LBB205_7
+# BB#8:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ jmp .LBB205_9
+.LBB205_7:
+ movl 44(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB205_9:
+ jne .LBB205_10
+# BB#11:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB205_12
+.LBB205_10:
+ movl 40(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB205_12:
+ jne .LBB205_13
+# BB#14:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB205_15
+.LBB205_13:
+ movl 36(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB205_15:
+ jne .LBB205_16
+# BB#17:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB205_18
+.LBB205_16:
+ movl 32(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB205_18:
+ jne .LBB205_19
+# BB#20:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB205_21
+.LBB205_19:
+ movl 28(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB205_21:
+ jne .LBB205_22
+# BB#23:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB205_24
+.LBB205_22:
+ movl 24(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB205_24:
+ jne .LBB205_25
+# BB#26:
+ movl $0, %eax
+ jmp .LBB205_27
+.LBB205_25:
+ movl 20(%edi), %eax
+.LBB205_27:
+ jne .LBB205_28
+# BB#29:
+ movl $0, %edx
+ jmp .LBB205_30
+.LBB205_28:
+ movl 16(%edi), %edx
+.LBB205_30:
+ jne .LBB205_31
+# BB#32:
+ movl $0, %esi
+ jmp .LBB205_33
+.LBB205_31:
+ movl 12(%edi), %esi
+.LBB205_33:
+ jne .LBB205_34
+# BB#35:
+ xorl %edi, %edi
+ jmp .LBB205_36
+.LBB205_34:
+ movl 8(%edi), %edi
+.LBB205_36:
+ addl 36(%esp), %ebx # 4-byte Folded Reload
+ movl 16(%esp), %ebp # 4-byte Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %ebp, 56(%ecx)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 60(%ecx)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 64(%ecx)
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 68(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 76(%ecx)
+ movl 8(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 80(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 84(%ecx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl %eax, 96(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%ecx)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end205:
+ .size mcl_fpDbl_sub13Lbmi2, .Lfunc_end205-mcl_fpDbl_sub13Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv448x32,@function
+.LmulPv448x32: # @mulPv448x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $48, %esp
+ movl %edx, %eax
+ movl 68(%esp), %ebx
+ movl %ebx, %edx
+ mulxl 4(%eax), %edi, %esi
+ movl %ebx, %edx
+ mulxl (%eax), %ebp, %edx
+ movl %ebp, 44(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 8(%eax), %edx, %edi
+ adcl %esi, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 12(%eax), %edx, %esi
+ adcl %edi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 16(%eax), %edx, %edi
+ adcl %esi, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 20(%eax), %edx, %esi
+ adcl %edi, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 24(%eax), %edx, %edi
+ adcl %esi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 28(%eax), %edx, %esi
+ adcl %edi, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 32(%eax), %edx, %edi
+ adcl %esi, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 36(%eax), %edx, %esi
+ adcl %edi, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %ebx, %edx
+ mulxl 40(%eax), %edi, %ebp
+ adcl %esi, %edi
+ movl %ebx, %edx
+ mulxl 44(%eax), %esi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %ebp, %esi
+ movl %ebx, %edx
+ mulxl 48(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 36(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 32(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl 28(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%ecx)
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%ecx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%ecx)
+ movl %edi, 40(%ecx)
+ movl %esi, 44(%ecx)
+ movl %edx, 48(%ecx)
+ movl %ebx, %edx
+ mulxl 52(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ adcl $0, %edx
+ movl %edx, 56(%ecx)
+ movl %ecx, %eax
+ addl $48, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end206:
+ .size .LmulPv448x32, .Lfunc_end206-.LmulPv448x32
+
+ .globl mcl_fp_mulUnitPre14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre14Lbmi2,@function
+mcl_fp_mulUnitPre14Lbmi2: # @mcl_fp_mulUnitPre14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ calll .L207$pb
+.L207$pb:
+ popl %ebx
+.Ltmp38:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp38-.L207$pb), %ebx
+ movl 136(%esp), %eax
+ movl %eax, (%esp)
+ leal 48(%esp), %ecx
+ movl 132(%esp), %edx
+ calll .LmulPv448x32
+ movl 104(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp
+ movl 64(%esp), %ebx
+ movl 60(%esp), %edi
+ movl 56(%esp), %esi
+ movl 48(%esp), %edx
+ movl 52(%esp), %ecx
+ movl 128(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end207:
+ .size mcl_fp_mulUnitPre14Lbmi2, .Lfunc_end207-mcl_fp_mulUnitPre14Lbmi2
+
+ .globl mcl_fpDbl_mulPre14Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre14Lbmi2,@function
+mcl_fpDbl_mulPre14Lbmi2: # @mcl_fpDbl_mulPre14Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $268, %esp # imm = 0x10C
+ calll .L208$pb
+.L208$pb:
+ popl %ebx
+.Ltmp39:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp39-.L208$pb), %ebx
+ movl %ebx, -192(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 8(%esp)
+ movl 12(%ebp), %edi
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre7Lbmi2@PLT
+ leal 28(%esi), %eax
+ movl %eax, 8(%esp)
+ leal 28(%edi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 56(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre7Lbmi2@PLT
+ movl 44(%edi), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl 40(%edi), %eax
+ movl 36(%edi), %edx
+ movl (%edi), %edi
+ movl 12(%ebp), %ecx
+ movl 4(%ecx), %ecx
+ movl 12(%ebp), %ebx
+ addl 28(%ebx), %edi
+ movl %edi, -180(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ adcl 32(%edi), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ adcl 8(%edi), %edx
+ movl %edx, -212(%ebp) # 4-byte Spill
+ adcl 12(%edi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl 16(%edi), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl %eax, %ebx
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ movl (%esi), %eax
+ addl 28(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ movl 4(%esi), %eax
+ adcl 32(%esi), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl 36(%esi), %eax
+ adcl 8(%esi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl 40(%esi), %eax
+ adcl 12(%esi), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl 44(%esi), %eax
+ adcl 16(%esi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ movl 48(%esi), %ecx
+ adcl 20(%esi), %ecx
+ movl 52(%esi), %eax
+ adcl 24(%esi), %eax
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %esi
+ popl %eax
+ movl %esi, -220(%ebp) # 4-byte Spill
+ movl %ebx, %esi
+ movl %edx, -184(%ebp) # 4-byte Spill
+ movl -180(%ebp), %edx # 4-byte Reload
+ movl %edx, -188(%ebp) # 4-byte Spill
+ jb .LBB208_2
+# BB#1:
+ xorl %esi, %esi
+ movl $0, -184(%ebp) # 4-byte Folded Spill
+ movl $0, -188(%ebp) # 4-byte Folded Spill
+.LBB208_2:
+ movl %esi, -204(%ebp) # 4-byte Spill
+ movl 52(%edi), %esi
+ movl 48(%edi), %ebx
+ movl -128(%ebp), %edx # 4-byte Reload
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 20(%edi), %ebx
+ movl %ebx, -160(%ebp) # 4-byte Spill
+ adcl 24(%edi), %esi
+ movl %esi, -208(%ebp) # 4-byte Spill
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ movl -176(%ebp), %esi # 4-byte Reload
+ movl %esi, -128(%ebp) # 4-byte Spill
+ movl -172(%ebp), %esi # 4-byte Reload
+ movl %esi, -132(%ebp) # 4-byte Spill
+ movl -168(%ebp), %esi # 4-byte Reload
+ movl %esi, -136(%ebp) # 4-byte Spill
+ movl -164(%ebp), %esi # 4-byte Reload
+ movl %esi, -140(%ebp) # 4-byte Spill
+ movl -216(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -144(%ebp) # 4-byte Spill
+ jb .LBB208_4
+# BB#3:
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ movl $0, -128(%ebp) # 4-byte Folded Spill
+ movl $0, -132(%ebp) # 4-byte Folded Spill
+ movl $0, -136(%ebp) # 4-byte Folded Spill
+ movl $0, -140(%ebp) # 4-byte Folded Spill
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+.LBB208_4:
+ movl -180(%ebp), %edx # 4-byte Reload
+ movl %edx, -96(%ebp)
+ movl -200(%ebp), %esi # 4-byte Reload
+ movl %esi, -92(%ebp)
+ movl -212(%ebp), %edx # 4-byte Reload
+ movl %edx, -88(%ebp)
+ movl -196(%ebp), %edi # 4-byte Reload
+ movl %edi, -84(%ebp)
+ movl -156(%ebp), %edx # 4-byte Reload
+ movl %edx, -80(%ebp)
+ movl %ebx, -124(%ebp)
+ movl -164(%ebp), %edx # 4-byte Reload
+ movl %edx, -120(%ebp)
+ movl -168(%ebp), %edx # 4-byte Reload
+ movl %edx, -116(%ebp)
+ movl -172(%ebp), %edx # 4-byte Reload
+ movl %edx, -112(%ebp)
+ movl -176(%ebp), %edx # 4-byte Reload
+ movl %edx, -108(%ebp)
+ movl %ecx, -104(%ebp)
+ movl %edi, %ebx
+ movl %esi, %edi
+ movl %eax, -100(%ebp)
+ sbbl %edx, %edx
+ movl -160(%ebp), %eax # 4-byte Reload
+ movl %eax, -76(%ebp)
+ movl -208(%ebp), %esi # 4-byte Reload
+ movl %esi, -72(%ebp)
+ movl -220(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB208_6
+# BB#5:
+ movl $0, %esi
+ movl $0, %eax
+ movl $0, %ebx
+ movl $0, %edi
+.LBB208_6:
+ movl %eax, -160(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -124(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -96(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -68(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -188(%ebp), %eax # 4-byte Reload
+ addl %eax, -144(%ebp) # 4-byte Folded Spill
+ adcl %edi, -140(%ebp) # 4-byte Folded Spill
+ movl -184(%ebp), %eax # 4-byte Reload
+ adcl %eax, -136(%ebp) # 4-byte Folded Spill
+ adcl %ebx, -132(%ebp) # 4-byte Folded Spill
+ movl -204(%ebp), %eax # 4-byte Reload
+ adcl %eax, -128(%ebp) # 4-byte Folded Spill
+ movl -152(%ebp), %edi # 4-byte Reload
+ adcl -160(%ebp), %edi # 4-byte Folded Reload
+ adcl %esi, -148(%ebp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -156(%ebp) # 4-byte Spill
+ movl -192(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre7Lbmi2@PLT
+ movl -144(%ebp), %eax # 4-byte Reload
+ addl -40(%ebp), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl -140(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ movl -136(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ movl -132(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ adcl -20(%ebp), %edi
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ adcl %esi, -156(%ebp) # 4-byte Folded Spill
+ movl -68(%ebp), %eax
+ movl 8(%ebp), %esi
+ subl (%esi), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl -64(%ebp), %ecx
+ sbbl 4(%esi), %ecx
+ movl -60(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl -56(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl -52(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl -48(%ebp), %eax
+ sbbl 20(%esi), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl -44(%ebp), %eax
+ sbbl 24(%esi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl 28(%esi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -188(%ebp) # 4-byte Spill
+ sbbl %eax, -132(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -192(%ebp) # 4-byte Spill
+ sbbl %eax, -128(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl %edi, -152(%ebp) # 4-byte Spill
+ movl 52(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ movl -148(%ebp), %edi # 4-byte Reload
+ sbbl %eax, %edi
+ sbbl $0, -156(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ subl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl 60(%esi), %eax
+ movl %eax, -232(%ebp) # 4-byte Spill
+ sbbl %eax, %ecx
+ movl 64(%esi), %eax
+ movl %eax, -236(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -240(%ebp) # 4-byte Spill
+ sbbl %eax, %edx
+ movl 72(%esi), %eax
+ movl %eax, -244(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 76(%esi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 80(%esi), %eax
+ movl %eax, -252(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 84(%esi), %eax
+ movl %eax, -256(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 96(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -132(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -128(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl %edi, -148(%ebp) # 4-byte Spill
+ movl -156(%ebp), %edi # 4-byte Reload
+ sbbl $0, %edi
+ movl -172(%ebp), %eax # 4-byte Reload
+ addl -176(%ebp), %eax # 4-byte Folded Reload
+ adcl -180(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -184(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%esi)
+ adcl -188(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ adcl -192(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 40(%esi)
+ movl -164(%ebp), %eax # 4-byte Reload
+ adcl -196(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 44(%esi)
+ movl -168(%ebp), %ecx # 4-byte Reload
+ adcl -200(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -228(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl -140(%ebp), %ecx # 4-byte Reload
+ adcl -232(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 56(%esi)
+ movl -136(%ebp), %eax # 4-byte Reload
+ adcl -236(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 60(%esi)
+ movl -132(%ebp), %ecx # 4-byte Reload
+ adcl -240(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 64(%esi)
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -244(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 68(%esi)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -248(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 72(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -252(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 76(%esi)
+ adcl -256(%ebp), %edi # 4-byte Folded Reload
+ movl %eax, 80(%esi)
+ movl %edi, 84(%esi)
+ movl -208(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 88(%esi)
+ movl -212(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 92(%esi)
+ movl -216(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 96(%esi)
+ movl -220(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -224(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -204(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ addl $268, %esp # imm = 0x10C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end208:
+ .size mcl_fpDbl_mulPre14Lbmi2, .Lfunc_end208-mcl_fpDbl_mulPre14Lbmi2
+
+ .globl mcl_fpDbl_sqrPre14Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre14Lbmi2,@function
+mcl_fpDbl_sqrPre14Lbmi2: # @mcl_fpDbl_sqrPre14Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $268, %esp # imm = 0x10C
+ calll .L209$pb
+.L209$pb:
+ popl %ebx
+.Ltmp40:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp40-.L209$pb), %ebx
+ movl %ebx, -172(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre7Lbmi2@PLT
+ leal 28(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 56(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre7Lbmi2@PLT
+ movl 48(%edi), %eax
+ movl 44(%edi), %ecx
+ movl 36(%edi), %edx
+ movl (%edi), %esi
+ movl 4(%edi), %ebx
+ addl 28(%edi), %esi
+ adcl 32(%edi), %ebx
+ movl %ebx, -164(%ebp) # 4-byte Spill
+ adcl 8(%edi), %edx
+ movl %edx, -160(%ebp) # 4-byte Spill
+ movl 40(%edi), %edx
+ adcl 12(%edi), %edx
+ adcl 16(%edi), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ adcl 20(%edi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ movl 52(%edi), %ecx
+ adcl 24(%edi), %ecx
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edi
+ seto %al
+ lahf
+ movl %eax, %eax
+ sbbl %ebx, %ebx
+ movl %ebx, -128(%ebp) # 4-byte Spill
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_1
+# BB#2:
+ movl %esi, -168(%ebp) # 4-byte Spill
+ movl $0, -132(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_3
+.LBB209_1:
+ leal (%esi,%esi), %eax
+ movl %esi, -168(%ebp) # 4-byte Spill
+ movl %eax, -132(%ebp) # 4-byte Spill
+.LBB209_3:
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ movl -180(%ebp), %ebx # 4-byte Reload
+ jb .LBB209_4
+# BB#5:
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_6
+.LBB209_4:
+ movl -164(%ebp), %eax # 4-byte Reload
+ movl -168(%ebp), %esi # 4-byte Reload
+ shldl $1, %esi, %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+.LBB209_6:
+ movl -176(%ebp), %edi # 4-byte Reload
+ movl -136(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_7
+# BB#8:
+ movl $0, -136(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_9
+.LBB209_7:
+ movl -160(%ebp), %eax # 4-byte Reload
+ movl -164(%ebp), %esi # 4-byte Reload
+ shldl $1, %esi, %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+.LBB209_9:
+ movl %ebx, %esi
+ movl -140(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_10
+# BB#11:
+ movl $0, -140(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_12
+.LBB209_10:
+ movl %edx, %eax
+ movl -160(%ebp), %ebx # 4-byte Reload
+ shldl $1, %ebx, %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+.LBB209_12:
+ movl -144(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_13
+# BB#14:
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_15
+.LBB209_13:
+ movl %esi, %eax
+ shldl $1, %edx, %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+.LBB209_15:
+ movl -148(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_16
+# BB#17:
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_18
+.LBB209_16:
+ movl %edi, %eax
+ shldl $1, %esi, %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+.LBB209_18:
+ movl -152(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_19
+# BB#20:
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_21
+.LBB209_19:
+ movl %ecx, %eax
+ shldl $1, %edi, %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+.LBB209_21:
+ movl -168(%ebp), %eax # 4-byte Reload
+ movl %eax, -96(%ebp)
+ movl %eax, -124(%ebp)
+ movl -164(%ebp), %eax # 4-byte Reload
+ movl %eax, -92(%ebp)
+ movl %eax, -120(%ebp)
+ movl -160(%ebp), %eax # 4-byte Reload
+ movl %eax, -88(%ebp)
+ movl %eax, -116(%ebp)
+ movl %edx, -84(%ebp)
+ movl %edx, -112(%ebp)
+ movl %esi, -80(%ebp)
+ movl %esi, -108(%ebp)
+ movl %edi, -76(%ebp)
+ movl %edi, -104(%ebp)
+ movl %ecx, -72(%ebp)
+ movl %ecx, -100(%ebp)
+ movl -184(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_22
+# BB#23:
+ xorl %edi, %edi
+ jmp .LBB209_24
+.LBB209_22:
+ shrl $31, %ecx
+ movl %ecx, %edi
+.LBB209_24:
+ leal -68(%ebp), %eax
+ movl %eax, (%esp)
+ leal -96(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -124(%ebp), %eax
+ movl %eax, 8(%esp)
+ movl -128(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -172(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre7Lbmi2@PLT
+ movl -132(%ebp), %eax # 4-byte Reload
+ addl -40(%ebp), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -32(%ebp), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ movl -140(%ebp), %ecx # 4-byte Reload
+ adcl -28(%ebp), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl -24(%ebp), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -148(%ebp), %ecx # 4-byte Reload
+ adcl -20(%ebp), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -16(%ebp), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ adcl %edi, %esi
+ movl %esi, -128(%ebp) # 4-byte Spill
+ movl -68(%ebp), %ecx
+ movl 8(%ebp), %esi
+ subl (%esi), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ movl -64(%ebp), %edi
+ sbbl 4(%esi), %edi
+ movl -60(%ebp), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, -160(%ebp) # 4-byte Spill
+ movl -56(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, -168(%ebp) # 4-byte Spill
+ movl -52(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl -48(%ebp), %ecx
+ sbbl 20(%esi), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl -44(%ebp), %edx
+ sbbl 24(%esi), %edx
+ movl %edx, -164(%ebp) # 4-byte Spill
+ movl 28(%esi), %edx
+ movl %edx, -176(%ebp) # 4-byte Spill
+ sbbl %edx, -132(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl 36(%esi), %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -188(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -192(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl -128(%ebp), %ecx # 4-byte Reload
+ sbbl $0, %ecx
+ movl 56(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ movl -204(%ebp), %edx # 4-byte Reload
+ subl %eax, %edx
+ movl 60(%esi), %eax
+ movl %eax, -232(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl 64(%esi), %eax
+ movl %eax, -236(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -240(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 72(%esi), %eax
+ movl %eax, -244(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 76(%esi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ sbbl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl 80(%esi), %eax
+ movl %eax, -252(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 84(%esi), %eax
+ movl %eax, -256(%ebp) # 4-byte Spill
+ sbbl %eax, -132(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, -156(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 96(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ sbbl $0, %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ movl %edx, %eax
+ addl -176(%ebp), %eax # 4-byte Folded Reload
+ adcl -180(%ebp), %edi # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -184(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 32(%esi)
+ movl -168(%ebp), %ecx # 4-byte Reload
+ adcl -188(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ adcl -192(%ebp), %ebx # 4-byte Folded Reload
+ movl %ecx, 40(%esi)
+ movl -172(%ebp), %eax # 4-byte Reload
+ adcl -196(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 44(%esi)
+ movl -164(%ebp), %ecx # 4-byte Reload
+ adcl -200(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl -132(%ebp), %eax # 4-byte Reload
+ adcl -228(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl -156(%ebp), %edx # 4-byte Reload
+ adcl -232(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 56(%esi)
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -236(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 60(%esi)
+ movl -140(%ebp), %eax # 4-byte Reload
+ adcl -240(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 64(%esi)
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl -244(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 68(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -248(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 72(%esi)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -252(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 76(%esi)
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -256(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 80(%esi)
+ movl %eax, 84(%esi)
+ movl -204(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 88(%esi)
+ movl -208(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 92(%esi)
+ movl -212(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 96(%esi)
+ movl -216(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -220(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -224(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ addl $268, %esp # imm = 0x10C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end209:
+ .size mcl_fpDbl_sqrPre14Lbmi2, .Lfunc_end209-mcl_fpDbl_sqrPre14Lbmi2
+
+ .globl mcl_fp_mont14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont14Lbmi2,@function
+mcl_fp_mont14Lbmi2: # @mcl_fp_mont14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1900, %esp # imm = 0x76C
+ calll .L210$pb
+.L210$pb:
+ popl %ebx
+.Ltmp41:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp41-.L210$pb), %ebx
+ movl 1932(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1840(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 1840(%esp), %edi
+ movl 1844(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1896(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 1892(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 1888(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 1884(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1880(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1876(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1872(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1868(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1864(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1860(%esp), %esi
+ movl 1856(%esp), %ebp
+ movl 1852(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1848(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1776(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ addl 1776(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1784(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1792(%esp), %ebp
+ adcl 1796(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1804(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 1928(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1712(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %edi
+ movl %edi, %edx
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 1712(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1716(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1720(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1724(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 1728(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1732(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1740(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1756(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1768(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1648(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl 100(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1648(%esp), %ebp
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1652(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1656(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1660(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1664(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1668(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1672(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1676(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 1680(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1684(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1688(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1692(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1696(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1700(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1704(%esp), %esi
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 1928(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1584(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1584(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1592(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1600(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1604(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1608(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1620(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1628(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1636(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl 1640(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1520(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1520(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 1544(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 1564(%esp), %ebp
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1568(%esp), %esi
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 1572(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1576(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1456(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1456(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1472(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1484(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1488(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1492(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1496(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ adcl 1500(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl 1504(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1512(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1392(%esp), %ecx
+ movl 1932(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ andl $1, %edi
+ movl %edi, %eax
+ addl 1392(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1396(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1400(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1404(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1408(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1412(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1416(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1420(%esp), %esi
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 1424(%esp), %ebp
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1428(%esp), %edi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1432(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1436(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1440(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1448(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1328(%esp), %ecx
+ movl 1924(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1328(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1352(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1356(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl 1360(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1384(%esp), %edi
+ sbbl %esi, %esi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1264(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1284(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1312(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1316(%esp), %esi
+ adcl 1320(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1200(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 68(%esp), %eax # 4-byte Reload
+ addl 1200(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1204(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1208(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1212(%esp), %edi
+ adcl 1216(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1224(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1228(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1244(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 1248(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1252(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1256(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %eax, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1136(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1136(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1144(%esp), %ebp
+ adcl 1148(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1172(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 1180(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1072(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 1072(%esp), %eax
+ adcl 1076(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1088(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1092(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1096(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1100(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1104(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1108(%esp), %ebp
+ adcl 1112(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1116(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1120(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1124(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1128(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %eax, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1008(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1008(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1020(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 1036(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1044(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1052(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 944(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 944(%esp), %eax
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 952(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 956(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 960(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 964(%esp), %esi
+ adcl 968(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 972(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 976(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 980(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 984(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 988(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 992(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 996(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1000(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %eax, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %ebp
+ addl 880(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 896(%esp), %edi
+ adcl 900(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 924(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 816(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 824(%esp), %ebp
+ adcl 828(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 856(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 872(%esp), %esi
+ sbbl %eax, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl 56(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 752(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 760(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 764(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 768(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 772(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 776(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 784(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 792(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 796(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 800(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 804(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 808(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 688(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 728(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 732(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 740(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 624(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 636(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 648(%esp), %esi
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 652(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 560(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 568(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 580(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl 584(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 592(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 496(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 508(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 520(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 528(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 540(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 432(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 440(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 444(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 452(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 472(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 368(%esp), %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 376(%esp), %esi
+ adcl 380(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 304(%esp), %ecx
+ adcl 308(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl 312(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 324(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 328(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 240(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 240(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 248(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 252(%esp), %edi
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 256(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 264(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 268(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 104(%esp), %ecx # 4-byte Reload
+ addl 176(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 184(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ adcl 188(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 192(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 200(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %ebp
+ addl 112(%esp), %esi
+ movl 100(%esp), %esi # 4-byte Reload
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 124(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl 128(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 132(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 140(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 148(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 152(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 160(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 164(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 168(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 1932(%esp), %ecx
+ subl (%ecx), %eax
+ sbbl 4(%ecx), %edx
+ sbbl 8(%ecx), %esi
+ sbbl 12(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 20(%ecx), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 24(%ecx), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 60(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ecx), %ebx
+ movl 52(%esp), %edi # 4-byte Reload
+ sbbl 32(%ecx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ sbbl 36(%ecx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 40(%ecx), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 44(%ecx), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ sbbl 48(%ecx), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ sbbl 52(%ecx), %edi
+ movl %ebp, %ecx
+ movl %edi, 104(%esp) # 4-byte Spill
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB210_2
+# BB#1:
+ movl %ebx, 60(%esp) # 4-byte Spill
+.LBB210_2:
+ testb %cl, %cl
+ movl 108(%esp), %ebx # 4-byte Reload
+ jne .LBB210_4
+# BB#3:
+ movl %eax, %ebx
+.LBB210_4:
+ movl 1920(%esp), %eax
+ movl %ebx, (%eax)
+ movl 92(%esp), %edi # 4-byte Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB210_6
+# BB#5:
+ movl %edx, %edi
+.LBB210_6:
+ movl %edi, 4(%eax)
+ jne .LBB210_8
+# BB#7:
+ movl %esi, 100(%esp) # 4-byte Spill
+.LBB210_8:
+ movl 100(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%eax)
+ jne .LBB210_10
+# BB#9:
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+.LBB210_10:
+ movl 84(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%eax)
+ jne .LBB210_12
+# BB#11:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB210_12:
+ movl %ecx, 16(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ jne .LBB210_14
+# BB#13:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB210_14:
+ movl %ecx, 20(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB210_16
+# BB#15:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB210_16:
+ movl %ecx, 24(%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ jne .LBB210_18
+# BB#17:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB210_18:
+ movl %ecx, 32(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB210_20
+# BB#19:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB210_20:
+ movl %ecx, 36(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB210_22
+# BB#21:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB210_22:
+ movl %ecx, 40(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ jne .LBB210_24
+# BB#23:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB210_24:
+ movl %ecx, 44(%eax)
+ movl 88(%esp), %ecx # 4-byte Reload
+ jne .LBB210_26
+# BB#25:
+ movl 48(%esp), %ecx # 4-byte Reload
+.LBB210_26:
+ movl %ecx, 48(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ jne .LBB210_28
+# BB#27:
+ movl 104(%esp), %ecx # 4-byte Reload
+.LBB210_28:
+ movl %ecx, 52(%eax)
+ addl $1900, %esp # imm = 0x76C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end210:
+ .size mcl_fp_mont14Lbmi2, .Lfunc_end210-mcl_fp_mont14Lbmi2
+
+ .globl mcl_fp_montNF14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF14Lbmi2,@function
+mcl_fp_montNF14Lbmi2: # @mcl_fp_montNF14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1884, %esp # imm = 0x75C
+ calll .L211$pb
+.L211$pb:
+ popl %ebx
+.Ltmp42:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp42-.L211$pb), %ebx
+ movl 1916(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1824(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1824(%esp), %edi
+ movl 1828(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1880(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 1876(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1872(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1868(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1864(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1860(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1856(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1852(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1848(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1844(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1840(%esp), %esi
+ movl 1836(%esp), %ebp
+ movl 1832(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1760(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1760(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1768(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1772(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl 1776(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1784(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1804(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1808(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1816(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1696(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1752(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1696(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1700(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1704(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1708(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1712(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1716(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1720(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1724(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1728(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1732(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1736(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1740(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ adcl 1748(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %ebp
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1632(%esp), %ecx
+ movl 1916(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ addl 1632(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1640(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 1664(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1680(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1688(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1568(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1624(%esp), %eax
+ movl 88(%esp), %edx # 4-byte Reload
+ addl 1568(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1572(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1576(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1580(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1584(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1588(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1592(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 1596(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1600(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1604(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1608(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 1612(%esp), %edi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1616(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1620(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %ebp
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1504(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1504(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1512(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1544(%esp), %esi
+ adcl 1548(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1560(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1440(%esp), %ecx
+ movl 1908(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ movl 1496(%esp), %eax
+ movl 72(%esp), %edx # 4-byte Reload
+ addl 1440(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1448(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1452(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1460(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1464(%esp), %edi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1468(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1472(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1476(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1480(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1484(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1488(%esp), %esi
+ adcl 1492(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1376(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1376(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1400(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1424(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1312(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1368(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1312(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1320(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 1328(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1336(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1340(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1344(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1348(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1352(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1360(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1364(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1248(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1248(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 1264(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1272(%esp), %ebp
+ adcl 1276(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1284(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1300(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1184(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1240(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1184(%esp), %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1204(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1216(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1232(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1120(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1120(%esp), %esi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 1124(%esp), %ebp
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1128(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1156(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1056(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1112(%esp), %eax
+ movl %ebp, %ecx
+ addl 1056(%esp), %ecx
+ adcl 1060(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 1064(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1068(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1072(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1076(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1080(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1084(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 1088(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1092(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 1096(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 1100(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1104(%esp), %ebp
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 1108(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 992(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 1008(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1040(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1044(%esp), %ebp
+ adcl 1048(%esp), %esi
+ movl 1912(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 984(%esp), %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 932(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 936(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 940(%esp), %edi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 944(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 960(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 968(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 972(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 976(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 980(%esp), %esi
+ movl %esi, %ebp
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 864(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 864(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 876(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 884(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 916(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 920(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 800(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 856(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 800(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 808(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 816(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 828(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 852(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 736(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 736(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 764(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 772(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 780(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 728(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 672(%esp), %ecx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 700(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl 704(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 712(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 608(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 616(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 624(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 644(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 600(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 544(%esp), %ecx
+ adcl 548(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 556(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 568(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 576(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 480(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 488(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 496(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 504(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 532(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 472(%esp), %edx
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 416(%esp), %ecx
+ adcl 420(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 424(%esp), %edi
+ adcl 428(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 464(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 352(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 360(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 364(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 388(%esp), %edi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 288(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 344(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 288(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 296(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 320(%esp), %edi
+ adcl 324(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 328(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 224(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 232(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 256(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 260(%esp), %edi
+ adcl 264(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 216(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 160(%esp), %ecx
+ adcl 164(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 168(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 192(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 96(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 96(%esp), %esi
+ movl 64(%esp), %esi # 4-byte Reload
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 104(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ adcl 108(%esp), %esi
+ adcl 112(%esp), %edi
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 132(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1916(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %ebx
+ movl %esi, %eax
+ sbbl 8(%ebp), %eax
+ movl %edi, %ecx
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 40(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 44(%ebp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 48(%ebp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 52(%ebp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ movl 92(%esp), %ebp # 4-byte Reload
+ js .LBB211_2
+# BB#1:
+ movl %edx, %ebp
+.LBB211_2:
+ movl 1904(%esp), %edx
+ movl %ebp, (%edx)
+ movl 88(%esp), %ebp # 4-byte Reload
+ js .LBB211_4
+# BB#3:
+ movl %ebx, %ebp
+.LBB211_4:
+ movl %ebp, 4(%edx)
+ js .LBB211_6
+# BB#5:
+ movl %eax, %esi
+.LBB211_6:
+ movl %esi, 8(%edx)
+ js .LBB211_8
+# BB#7:
+ movl 4(%esp), %edi # 4-byte Reload
+.LBB211_8:
+ movl %edi, 12(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB211_10
+# BB#9:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB211_10:
+ movl %eax, 16(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB211_12
+# BB#11:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB211_12:
+ movl %eax, 20(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB211_14
+# BB#13:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB211_14:
+ movl %eax, 24(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB211_16
+# BB#15:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB211_16:
+ movl %eax, 28(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB211_18
+# BB#17:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB211_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB211_20
+# BB#19:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB211_20:
+ movl %eax, 36(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB211_22
+# BB#21:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB211_22:
+ movl %eax, 40(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB211_24
+# BB#23:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB211_24:
+ movl %eax, 44(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB211_26
+# BB#25:
+ movl 64(%esp), %eax # 4-byte Reload
+.LBB211_26:
+ movl %eax, 48(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB211_28
+# BB#27:
+ movl 72(%esp), %eax # 4-byte Reload
+.LBB211_28:
+ movl %eax, 52(%edx)
+ addl $1884, %esp # imm = 0x75C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end211:
+ .size mcl_fp_montNF14Lbmi2, .Lfunc_end211-mcl_fp_montNF14Lbmi2
+
+ .globl mcl_fp_montRed14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed14Lbmi2,@function
+mcl_fp_montRed14Lbmi2: # @mcl_fp_montRed14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1068, %esp # imm = 0x42C
+ calll .L212$pb
+.L212$pb:
+ popl %eax
+.Ltmp43:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp43-.L212$pb), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1096(%esp), %edx
+ movl -4(%edx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1092(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 92(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ imull %eax, %ebx
+ movl 108(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 100(%ecx), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 92(%ecx), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 84(%ecx), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 80(%ecx), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 68(%ecx), %esi
+ movl %esi, 168(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ movl %esi, 164(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 52(%ecx), %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 40(%ecx), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 36(%ecx), %ebp
+ movl 32(%ecx), %edi
+ movl 28(%ecx), %esi
+ movl 24(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 52(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 1008(%esp), %ecx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ movl 92(%esp), %eax # 4-byte Reload
+ addl 1008(%esp), %eax
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1012(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1036(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1040(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 1044(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 1052(%esp), %ebp
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 944(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 944(%esp), %esi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 976(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 984(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %esi # 4-byte Reload
+ adcl 1000(%esp), %esi
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %ebp
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 880(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 884(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 908(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 920(%esp), %edi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl 932(%esp), %esi
+ movl %esi, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 816(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 820(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 152(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 752(%esp), %ebp
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 688(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 156(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 140(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 624(%esp), %ebp
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 628(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %ebp # 4-byte Reload
+ adcl 664(%esp), %ebp
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 560(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 564(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ adcl 596(%esp), %ebp
+ movl %ebp, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %edi # 4-byte Reload
+ adcl 600(%esp), %edi
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1096(%esp), %eax
+ movl %eax, %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 496(%esp), %ebp
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %ebp # 4-byte Reload
+ adcl 516(%esp), %ebp
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl 532(%esp), %edi
+ movl %edi, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 536(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 432(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 440(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ adcl 448(%esp), %ebp
+ movl %ebp, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %ecx # 4-byte Reload
+ adcl 452(%esp), %ecx
+ movl %ecx, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %ebp # 4-byte Reload
+ adcl 456(%esp), %ebp
+ movl 164(%esp), %ecx # 4-byte Reload
+ adcl 460(%esp), %ecx
+ movl %ecx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 464(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ adcl 468(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 484(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %eax, %esi
+ movl 88(%esp), %edi # 4-byte Reload
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 368(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 376(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 380(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %esi # 4-byte Reload
+ adcl 384(%esp), %esi
+ adcl 388(%esp), %ebp
+ movl %ebp, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %ecx # 4-byte Reload
+ adcl 392(%esp), %ecx
+ movl %ecx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 396(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 400(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 404(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 408(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 412(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 416(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 424(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %eax, %ebp
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 304(%esp), %ebp
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 308(%esp), %edi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 316(%esp), %ebp
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %edi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 240(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 240(%esp), %edi
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 244(%esp), %ecx
+ adcl 248(%esp), %ebp
+ movl %ebp, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl 264(%esp), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl 268(%esp), %edi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 280(%esp), %ebp
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 176(%esp), %esi
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %ebx # 4-byte Reload
+ adcl 188(%esp), %ebx
+ movl %ebx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %edx # 4-byte Reload
+ adcl 196(%esp), %edx
+ movl %edx, 136(%esp) # 4-byte Spill
+ movl %edi, %eax
+ adcl 200(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl 212(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 232(%esp), %ecx
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 172(%esp), %edi # 4-byte Reload
+ subl 16(%esp), %edi # 4-byte Folded Reload
+ movl 160(%esp), %ebp # 4-byte Reload
+ sbbl 8(%esp), %ebp # 4-byte Folded Reload
+ sbbl 12(%esp), %ebx # 4-byte Folded Reload
+ movl 168(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ sbbl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 148(%esp), %edx # 4-byte Reload
+ sbbl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 156(%esp), %edx # 4-byte Reload
+ sbbl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 152(%esp), %edx # 4-byte Reload
+ sbbl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 124(%esp), %edx # 4-byte Reload
+ sbbl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 140(%esp), %edx # 4-byte Reload
+ sbbl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 128(%esp), %edx # 4-byte Reload
+ sbbl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %edx # 4-byte Reload
+ sbbl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ sbbl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 132(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ sbbl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 144(%esp) # 4-byte Spill
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB212_2
+# BB#1:
+ movl %eax, 168(%esp) # 4-byte Spill
+.LBB212_2:
+ movl %esi, %edx
+ testb %dl, %dl
+ movl 172(%esp), %eax # 4-byte Reload
+ jne .LBB212_4
+# BB#3:
+ movl %edi, %eax
+.LBB212_4:
+ movl 1088(%esp), %edi
+ movl %eax, (%edi)
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ movl 160(%esp), %ecx # 4-byte Reload
+ jne .LBB212_6
+# BB#5:
+ movl %ebp, %ecx
+.LBB212_6:
+ movl %ecx, 4(%edi)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl 164(%esp), %ebp # 4-byte Reload
+ jne .LBB212_8
+# BB#7:
+ movl %ebx, %ebp
+.LBB212_8:
+ movl %ebp, 8(%edi)
+ movl 168(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%edi)
+ movl 124(%esp), %ebp # 4-byte Reload
+ movl 136(%esp), %ebx # 4-byte Reload
+ jne .LBB212_10
+# BB#9:
+ movl 80(%esp), %ebx # 4-byte Reload
+.LBB212_10:
+ movl %ebx, 16(%edi)
+ movl 140(%esp), %ebx # 4-byte Reload
+ movl 148(%esp), %esi # 4-byte Reload
+ jne .LBB212_12
+# BB#11:
+ movl 84(%esp), %esi # 4-byte Reload
+.LBB212_12:
+ movl %esi, 20(%edi)
+ movl 128(%esp), %esi # 4-byte Reload
+ jne .LBB212_14
+# BB#13:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB212_14:
+ movl %eax, 24(%edi)
+ movl 120(%esp), %edx # 4-byte Reload
+ jne .LBB212_16
+# BB#15:
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, 152(%esp) # 4-byte Spill
+.LBB212_16:
+ movl 152(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%edi)
+ jne .LBB212_18
+# BB#17:
+ movl 96(%esp), %ebp # 4-byte Reload
+.LBB212_18:
+ movl %ebp, 32(%edi)
+ jne .LBB212_20
+# BB#19:
+ movl 100(%esp), %ebx # 4-byte Reload
+.LBB212_20:
+ movl %ebx, 36(%edi)
+ jne .LBB212_22
+# BB#21:
+ movl 112(%esp), %esi # 4-byte Reload
+.LBB212_22:
+ movl %esi, 40(%edi)
+ jne .LBB212_24
+# BB#23:
+ movl 116(%esp), %edx # 4-byte Reload
+.LBB212_24:
+ movl %edx, 44(%edi)
+ jne .LBB212_26
+# BB#25:
+ movl 132(%esp), %ecx # 4-byte Reload
+.LBB212_26:
+ movl %ecx, 48(%edi)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB212_28
+# BB#27:
+ movl 144(%esp), %eax # 4-byte Reload
+.LBB212_28:
+ movl %eax, 52(%edi)
+ addl $1068, %esp # imm = 0x42C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end212:
+ .size mcl_fp_montRed14Lbmi2, .Lfunc_end212-mcl_fp_montRed14Lbmi2
+
+ .globl mcl_fp_addPre14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre14Lbmi2,@function
+mcl_fp_addPre14Lbmi2: # @mcl_fp_addPre14Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ebx)
+ movl 40(%ecx), %esi
+ adcl %edi, %esi
+ movl 44(%eax), %edi
+ movl %edx, 36(%ebx)
+ movl 44(%ecx), %edx
+ adcl %edi, %edx
+ movl 48(%eax), %edi
+ movl %esi, 40(%ebx)
+ movl 48(%ecx), %esi
+ adcl %edi, %esi
+ movl %edx, 44(%ebx)
+ movl %esi, 48(%ebx)
+ movl 52(%eax), %eax
+ movl 52(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 52(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end213:
+ .size mcl_fp_addPre14Lbmi2, .Lfunc_end213-mcl_fp_addPre14Lbmi2
+
+ .globl mcl_fp_subPre14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre14Lbmi2,@function
+mcl_fp_subPre14Lbmi2: # @mcl_fp_subPre14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ebp)
+ movl 40(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 44(%edx), %ebx
+ movl %esi, 36(%ebp)
+ movl 44(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 48(%edx), %ebx
+ movl %edi, 40(%ebp)
+ movl 48(%ecx), %edi
+ sbbl %ebx, %edi
+ movl %esi, 44(%ebp)
+ movl %edi, 48(%ebp)
+ movl 52(%edx), %edx
+ movl 52(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 52(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end214:
+ .size mcl_fp_subPre14Lbmi2, .Lfunc_end214-mcl_fp_subPre14Lbmi2
+
+ .globl mcl_fp_shr1_14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_14Lbmi2,@function
+mcl_fp_shr1_14Lbmi2: # @mcl_fp_shr1_14Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 48(%ecx)
+ shrl %eax
+ movl %eax, 52(%ecx)
+ popl %esi
+ retl
+.Lfunc_end215:
+ .size mcl_fp_shr1_14Lbmi2, .Lfunc_end215-mcl_fp_shr1_14Lbmi2
+
+ .globl mcl_fp_add14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add14Lbmi2,@function
+mcl_fp_add14Lbmi2: # @mcl_fp_add14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 72(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 68(%esp), %ebp
+ addl (%ebp), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 4(%ebp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 8(%eax), %ecx
+ adcl 8(%ebp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 12(%ebp), %edx
+ movl 16(%ebp), %ecx
+ adcl 12(%eax), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 16(%eax), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%ebp), %ecx
+ adcl 20(%eax), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 24(%ebp), %ecx
+ adcl 24(%eax), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 28(%ebp), %ecx
+ adcl 28(%eax), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 32(%ebp), %ecx
+ adcl 32(%eax), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 36(%ebp), %ecx
+ adcl 36(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 40(%ebp), %edx
+ adcl 40(%eax), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 44(%ebp), %ebx
+ adcl 44(%eax), %ebx
+ movl 48(%ebp), %esi
+ adcl 48(%eax), %esi
+ movl 52(%ebp), %edi
+ adcl 52(%eax), %edi
+ movl 64(%esp), %eax
+ movl 4(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl %edx, 40(%eax)
+ movl %ebx, 44(%eax)
+ movl %esi, 48(%eax)
+ movl %edi, 52(%eax)
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 76(%esp), %edx
+ subl (%edx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ sbbl 4(%edx), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ sbbl 8(%edx), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ sbbl 12(%edx), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ sbbl 16(%edx), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %ebp # 4-byte Reload
+ sbbl 20(%edx), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %ebp # 4-byte Reload
+ sbbl 24(%edx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %ebp # 4-byte Reload
+ sbbl 28(%edx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ sbbl 32(%edx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebp # 4-byte Reload
+ sbbl 36(%edx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl (%esp), %ebp # 4-byte Reload
+ sbbl 40(%edx), %ebp
+ sbbl 44(%edx), %ebx
+ sbbl 48(%edx), %esi
+ sbbl 52(%edx), %edi
+ sbbl $0, %ecx
+ testb $1, %cl
+ jne .LBB216_2
+# BB#1: # %nocarry
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, (%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl %ebp, 40(%eax)
+ movl %ebx, 44(%eax)
+ movl %esi, 48(%eax)
+ movl %edi, 52(%eax)
+.LBB216_2: # %carry
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end216:
+ .size mcl_fp_add14Lbmi2, .Lfunc_end216-mcl_fp_add14Lbmi2
+
+ .globl mcl_fp_addNF14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF14Lbmi2,@function
+mcl_fp_addNF14Lbmi2: # @mcl_fp_addNF14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $112, %esp
+ movl 140(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 136(%esp), %ecx
+ addl (%ecx), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 4(%ecx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 52(%eax), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 48(%eax), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 44(%eax), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 40(%eax), %ebp
+ movl 36(%eax), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 32(%eax), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 28(%eax), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 24(%eax), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 20(%eax), %ebx
+ movl 16(%eax), %edi
+ movl 12(%eax), %esi
+ movl 8(%eax), %edx
+ adcl 8(%ecx), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 12(%ecx), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 16(%ecx), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 20(%ecx), %ebx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 24(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 28(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 32(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 36(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 40(%ecx), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 44(%ecx), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 48(%ecx), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 52(%ecx), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ subl (%ecx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 4(%ecx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 8(%ecx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ movl %edx, %eax
+ sbbl 24(%ecx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 28(%ecx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 32(%ecx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ sbbl 36(%ecx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ sbbl 40(%ecx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ movl %eax, %esi
+ movl %eax, %ebp
+ sbbl 44(%ecx), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, %esi
+ sbbl 48(%ecx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, %edi
+ sbbl 52(%ecx), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl %edi, %ecx
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ movl 72(%esp), %ecx # 4-byte Reload
+ js .LBB217_2
+# BB#1:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB217_2:
+ movl 132(%esp), %edi
+ movl %ecx, (%edi)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB217_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB217_4:
+ movl %eax, 4(%edi)
+ movl %edx, %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ js .LBB217_6
+# BB#5:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB217_6:
+ movl %edx, 8(%edi)
+ movl %ebp, %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ js .LBB217_8
+# BB#7:
+ movl 12(%esp), %ebp # 4-byte Reload
+.LBB217_8:
+ movl %ebp, 12(%edi)
+ movl 100(%esp), %ebp # 4-byte Reload
+ js .LBB217_10
+# BB#9:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB217_10:
+ movl %eax, 16(%edi)
+ movl 80(%esp), %esi # 4-byte Reload
+ js .LBB217_12
+# BB#11:
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+.LBB217_12:
+ movl 68(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%edi)
+ js .LBB217_14
+# BB#13:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB217_14:
+ movl %ecx, 24(%edi)
+ js .LBB217_16
+# BB#15:
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+.LBB217_16:
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%edi)
+ js .LBB217_18
+# BB#17:
+ movl 32(%esp), %ebp # 4-byte Reload
+.LBB217_18:
+ movl %ebp, 32(%edi)
+ js .LBB217_20
+# BB#19:
+ movl 36(%esp), %ebx # 4-byte Reload
+.LBB217_20:
+ movl %ebx, 36(%edi)
+ js .LBB217_22
+# BB#21:
+ movl 40(%esp), %esi # 4-byte Reload
+.LBB217_22:
+ movl %esi, 40(%edi)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB217_24
+# BB#23:
+ movl 44(%esp), %edx # 4-byte Reload
+.LBB217_24:
+ movl %edx, 44(%edi)
+ movl 92(%esp), %ecx # 4-byte Reload
+ js .LBB217_26
+# BB#25:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB217_26:
+ movl %eax, 48(%edi)
+ js .LBB217_28
+# BB#27:
+ movl 52(%esp), %ecx # 4-byte Reload
+.LBB217_28:
+ movl %ecx, 52(%edi)
+ addl $112, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end217:
+ .size mcl_fp_addNF14Lbmi2, .Lfunc_end217-mcl_fp_addNF14Lbmi2
+
+ .globl mcl_fp_sub14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub14Lbmi2,@function
+mcl_fp_sub14Lbmi2: # @mcl_fp_sub14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 76(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 80(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ sbbl 32(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 36(%esi), %edx
+ sbbl 36(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 40(%esi), %ecx
+ sbbl 40(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ sbbl 44(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 48(%esi), %ebp
+ sbbl 48(%edi), %ebp
+ movl 52(%esi), %esi
+ sbbl 52(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 72(%esp), %ebx
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%ebx)
+ movl %edx, 36(%ebx)
+ movl %ecx, 40(%ebx)
+ movl %eax, 44(%ebx)
+ movl %ebp, 48(%ebx)
+ movl %esi, 52(%ebx)
+ je .LBB218_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 84(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 8(%esi), %edi
+ movl 12(%esi), %eax
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 44(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl %eax, 44(%ebx)
+ movl 48(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 48(%ebx)
+ movl 52(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ebx)
+.LBB218_2: # %nocarry
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end218:
+ .size mcl_fp_sub14Lbmi2, .Lfunc_end218-mcl_fp_sub14Lbmi2
+
+ .globl mcl_fp_subNF14Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF14Lbmi2,@function
+mcl_fp_subNF14Lbmi2: # @mcl_fp_subNF14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 112(%esp), %ecx
+ movl 52(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 116(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 32(%ecx), %ebp
+ movl 28(%ecx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ sbbl 32(%edi), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 52(%edi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %eax, %esi
+ sarl $31, %esi
+ movl %esi, %ecx
+ addl %ecx, %ecx
+ movl %esi, %ebp
+ adcl %ebp, %ebp
+ shrl $31, %eax
+ orl %ecx, %eax
+ movl 120(%esp), %edi
+ andl 4(%edi), %ebp
+ andl (%edi), %eax
+ movl 52(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 48(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 40(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 36(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 32(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 28(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 24(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 20(%edi), %ebx
+ andl %esi, %ebx
+ movl 16(%edi), %edx
+ andl %esi, %edx
+ movl 12(%edi), %ecx
+ andl %esi, %ecx
+ andl 8(%edi), %esi
+ addl 56(%esp), %eax # 4-byte Folded Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl 108(%esp), %edi
+ movl %eax, (%edi)
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %ebp, 4(%edi)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %esi, 8(%edi)
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 12(%edi)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, 16(%edi)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%edi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 24(%edi)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 28(%edi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%edi)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%edi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 44(%edi)
+ movl %eax, 48(%edi)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%edi)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end219:
+ .size mcl_fp_subNF14Lbmi2, .Lfunc_end219-mcl_fp_subNF14Lbmi2
+
+ .globl mcl_fpDbl_add14Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add14Lbmi2,@function
+mcl_fpDbl_add14Lbmi2: # @mcl_fpDbl_add14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 128(%esp), %ecx
+ movl 124(%esp), %esi
+ movl 12(%esi), %edi
+ movl 16(%esi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%esi), %ebp
+ movl 120(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%esi), %ebp
+ adcl 8(%esi), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 64(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%esi), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%esi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%esi), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%esi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%esi), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%esi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%esi), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %edx, 40(%eax)
+ movl 48(%esi), %edx
+ adcl %ebx, %edx
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%esi), %edi
+ adcl %ebx, %edi
+ movl 56(%ecx), %ebx
+ movl %edx, 48(%eax)
+ movl 56(%esi), %edx
+ adcl %ebx, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 60(%ecx), %edx
+ movl %edi, 52(%eax)
+ movl 60(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 64(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%ecx), %edx
+ movl 68(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%ecx), %edx
+ movl 72(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 76(%ecx), %edx
+ movl 76(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%ecx), %edx
+ movl 80(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%ecx), %edx
+ movl 84(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 88(%ecx), %edx
+ movl 88(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 92(%ecx), %edx
+ movl 92(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 96(%ecx), %edx
+ movl 96(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 100(%ecx), %edx
+ movl 100(%esi), %edi
+ adcl %edx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 104(%ecx), %edx
+ movl 104(%esi), %ebx
+ adcl %edx, %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 108(%ecx), %ecx
+ movl 108(%esi), %esi
+ adcl %ecx, %esi
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 132(%esp), %ebp
+ movl 72(%esp), %ecx # 4-byte Reload
+ subl (%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ sbbl 40(%ebp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 44(%ebp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl %esi, %ebx
+ sbbl 48(%ebp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl 52(%ebp), %esi
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB220_2
+# BB#1:
+ movl %esi, %ebx
+.LBB220_2:
+ testb %dl, %dl
+ movl 72(%esp), %eax # 4-byte Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB220_4
+# BB#3:
+ movl %ecx, %edx
+ movl (%esp), %edi # 4-byte Reload
+ movl 4(%esp), %ebp # 4-byte Reload
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB220_4:
+ movl 120(%esp), %esi
+ movl %eax, 56(%esi)
+ movl 76(%esp), %eax # 4-byte Reload
+ movl %eax, 60(%esi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 64(%esi)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%esi)
+ movl 88(%esp), %eax # 4-byte Reload
+ movl %eax, 72(%esi)
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esi)
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esi)
+ movl %ebp, 84(%esi)
+ movl %edi, 88(%esi)
+ movl %edx, 92(%esi)
+ movl 52(%esp), %edx # 4-byte Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ jne .LBB220_6
+# BB#5:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB220_6:
+ movl %eax, 96(%esi)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB220_8
+# BB#7:
+ movl 40(%esp), %edx # 4-byte Reload
+.LBB220_8:
+ movl %edx, 100(%esi)
+ jne .LBB220_10
+# BB#9:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB220_10:
+ movl %ecx, 104(%esi)
+ movl %ebx, 108(%esi)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end220:
+ .size mcl_fpDbl_add14Lbmi2, .Lfunc_end220-mcl_fpDbl_add14Lbmi2
+
+ .globl mcl_fpDbl_sub14Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub14Lbmi2,@function
+mcl_fpDbl_sub14Lbmi2: # @mcl_fpDbl_sub14Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $96, %esp
+ movl 120(%esp), %ebx
+ movl (%ebx), %eax
+ movl 4(%ebx), %edx
+ movl 124(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %edx
+ movl 8(%ebx), %esi
+ sbbl 8(%ebp), %esi
+ movl 116(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%ebx), %eax
+ sbbl 12(%ebp), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%ebx), %edx
+ sbbl 16(%ebp), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebp), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%ebx), %eax
+ sbbl %esi, %eax
+ movl 24(%ebp), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%ebx), %edx
+ sbbl %esi, %edx
+ movl 28(%ebp), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%ebx), %eax
+ sbbl %esi, %eax
+ movl 32(%ebp), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%ebx), %edx
+ sbbl %esi, %edx
+ movl 36(%ebp), %esi
+ movl %eax, 28(%ecx)
+ movl 36(%ebx), %eax
+ sbbl %esi, %eax
+ movl 40(%ebp), %esi
+ movl %edx, 32(%ecx)
+ movl 40(%ebx), %edx
+ sbbl %esi, %edx
+ movl 44(%ebp), %esi
+ movl %eax, 36(%ecx)
+ movl 44(%ebx), %eax
+ sbbl %esi, %eax
+ movl 48(%ebp), %esi
+ movl %edx, 40(%ecx)
+ movl 48(%ebx), %edx
+ sbbl %esi, %edx
+ movl 52(%ebp), %esi
+ movl %eax, 44(%ecx)
+ movl 52(%ebx), %eax
+ sbbl %esi, %eax
+ movl 56(%ebp), %esi
+ movl %edx, 48(%ecx)
+ movl 56(%ebx), %edx
+ sbbl %esi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 60(%ebp), %edx
+ movl %eax, 52(%ecx)
+ movl 60(%ebx), %eax
+ sbbl %edx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 64(%ebp), %eax
+ movl 64(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 68(%ebp), %eax
+ movl 68(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 72(%ebp), %eax
+ movl 72(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 76(%ebp), %eax
+ movl 76(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 80(%ebp), %eax
+ movl 80(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 84(%ebp), %eax
+ movl 84(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 88(%ebp), %eax
+ movl 88(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 92(%ebp), %eax
+ movl 92(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 96(%ebp), %eax
+ movl 96(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 100(%ebp), %eax
+ movl 100(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 104(%ebp), %eax
+ movl 104(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 108(%ebp), %eax
+ movl 108(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 128(%esp), %ebp
+ jne .LBB221_1
+# BB#2:
+ movl $0, 56(%esp) # 4-byte Folded Spill
+ jmp .LBB221_3
+.LBB221_1:
+ movl 52(%ebp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+.LBB221_3:
+ testb %al, %al
+ jne .LBB221_4
+# BB#5:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB221_6
+.LBB221_4:
+ movl (%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 4(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB221_6:
+ jne .LBB221_7
+# BB#8:
+ movl $0, 32(%esp) # 4-byte Folded Spill
+ jmp .LBB221_9
+.LBB221_7:
+ movl 48(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+.LBB221_9:
+ jne .LBB221_10
+# BB#11:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB221_12
+.LBB221_10:
+ movl 44(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+.LBB221_12:
+ jne .LBB221_13
+# BB#14:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB221_15
+.LBB221_13:
+ movl 40(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB221_15:
+ jne .LBB221_16
+# BB#17:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB221_18
+.LBB221_16:
+ movl 36(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB221_18:
+ jne .LBB221_19
+# BB#20:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB221_21
+.LBB221_19:
+ movl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB221_21:
+ jne .LBB221_22
+# BB#23:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB221_24
+.LBB221_22:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB221_24:
+ jne .LBB221_25
+# BB#26:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB221_27
+.LBB221_25:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB221_27:
+ jne .LBB221_28
+# BB#29:
+ movl $0, %esi
+ jmp .LBB221_30
+.LBB221_28:
+ movl 20(%ebp), %esi
+.LBB221_30:
+ jne .LBB221_31
+# BB#32:
+ movl $0, %edi
+ jmp .LBB221_33
+.LBB221_31:
+ movl 16(%ebp), %edi
+.LBB221_33:
+ jne .LBB221_34
+# BB#35:
+ movl $0, %ebx
+ jmp .LBB221_36
+.LBB221_34:
+ movl 12(%ebp), %ebx
+.LBB221_36:
+ jne .LBB221_37
+# BB#38:
+ xorl %ebp, %ebp
+ jmp .LBB221_39
+.LBB221_37:
+ movl 8(%ebp), %ebp
+.LBB221_39:
+ movl 20(%esp), %edx # 4-byte Reload
+ addl 44(%esp), %edx # 4-byte Folded Reload
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 56(%ecx)
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 60(%ecx)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 64(%ecx)
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 68(%ecx)
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 72(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 76(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 80(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 84(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 96(%ecx)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 100(%ecx)
+ movl %eax, 104(%ecx)
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%ecx)
+ addl $96, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end221:
+ .size mcl_fpDbl_sub14Lbmi2, .Lfunc_end221-mcl_fpDbl_sub14Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv480x32,@function
+.LmulPv480x32: # @mulPv480x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl %edx, %eax
+ movl 72(%esp), %edi
+ movl %edi, %edx
+ mulxl 4(%eax), %ebx, %esi
+ movl %edi, %edx
+ mulxl (%eax), %ebp, %edx
+ movl %ebp, 48(%esp) # 4-byte Spill
+ addl %ebx, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 8(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 12(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 16(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 20(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 24(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 28(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 32(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 36(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 40(%eax), %edx, %ebp
+ adcl %esi, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 44(%eax), %ebx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl %ebp, %ebx
+ movl %edi, %edx
+ mulxl 48(%eax), %esi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %edi, %edx
+ mulxl 52(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 44(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 36(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl 32(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%ecx)
+ movl 28(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%ecx)
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%ecx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%ecx)
+ movl %ebx, 44(%ecx)
+ movl %esi, 48(%ecx)
+ movl %edx, 52(%ecx)
+ movl %edi, %edx
+ mulxl 56(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%ecx)
+ adcl $0, %edx
+ movl %edx, 60(%ecx)
+ movl %ecx, %eax
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end222:
+ .size .LmulPv480x32, .Lfunc_end222-.LmulPv480x32
+
+ .globl mcl_fp_mulUnitPre15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre15Lbmi2,@function
+mcl_fp_mulUnitPre15Lbmi2: # @mcl_fp_mulUnitPre15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $124, %esp
+ calll .L223$pb
+.L223$pb:
+ popl %ebx
+.Ltmp44:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp44-.L223$pb), %ebx
+ movl 152(%esp), %eax
+ movl %eax, (%esp)
+ leal 56(%esp), %ecx
+ movl 148(%esp), %edx
+ calll .LmulPv480x32
+ movl 116(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 108(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 104(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp
+ movl 72(%esp), %ebx
+ movl 68(%esp), %edi
+ movl 64(%esp), %esi
+ movl 56(%esp), %edx
+ movl 60(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ addl $124, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end223:
+ .size mcl_fp_mulUnitPre15Lbmi2, .Lfunc_end223-mcl_fp_mulUnitPre15Lbmi2
+
+ .globl mcl_fpDbl_mulPre15Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre15Lbmi2,@function
+mcl_fpDbl_mulPre15Lbmi2: # @mcl_fpDbl_mulPre15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1084, %esp # imm = 0x43C
+ calll .L224$pb
+.L224$pb:
+ popl %esi
+.Ltmp45:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp45-.L224$pb), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 1112(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl 1108(%esp), %edi
+ movl %edi, %edx
+ movl %esi, %ebx
+ calll .LmulPv480x32
+ movl 1076(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1072(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1068(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1016(%esp), %eax
+ movl 1020(%esp), %ebp
+ movl 1104(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 1112(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl %edi, %edx
+ movl %esi, %ebx
+ calll .LmulPv480x32
+ addl 952(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 1012(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1008(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1004(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1000(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 996(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 992(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 972(%esp), %edi
+ movl 968(%esp), %esi
+ movl 964(%esp), %edx
+ movl 956(%esp), %eax
+ movl 960(%esp), %ecx
+ movl 1104(%esp), %ebp
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 72(%esp), %eax # 4-byte Reload
+ addl 888(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 948(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 944(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 940(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 936(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 932(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 928(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 924(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 916(%esp), %ebx
+ movl 912(%esp), %edi
+ movl 908(%esp), %esi
+ movl 904(%esp), %edx
+ movl 900(%esp), %ecx
+ movl 892(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 72(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 112(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 824(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 876(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 872(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 868(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 864(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 856(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 852(%esp), %ebx
+ movl 848(%esp), %edi
+ movl 844(%esp), %esi
+ movl 840(%esp), %edx
+ movl 836(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 760(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 788(%esp), %ebx
+ movl 784(%esp), %edi
+ movl 780(%esp), %esi
+ movl 776(%esp), %edx
+ movl 772(%esp), %ecx
+ movl 764(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 732(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 724(%esp), %ebx
+ movl 720(%esp), %edi
+ movl 716(%esp), %esi
+ movl 712(%esp), %edx
+ movl 708(%esp), %ecx
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 632(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %edx
+ movl 644(%esp), %ecx
+ movl 636(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 568(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 596(%esp), %ebx
+ movl 592(%esp), %edi
+ movl 588(%esp), %esi
+ movl 584(%esp), %edx
+ movl 580(%esp), %ecx
+ movl 572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 504(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 532(%esp), %ebx
+ movl 528(%esp), %edi
+ movl 524(%esp), %esi
+ movl 520(%esp), %edx
+ movl 516(%esp), %ecx
+ movl 508(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 440(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 468(%esp), %ebx
+ movl 464(%esp), %edi
+ movl 460(%esp), %esi
+ movl 456(%esp), %edx
+ movl 452(%esp), %ecx
+ movl 444(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 376(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 404(%esp), %ebx
+ movl 400(%esp), %edi
+ movl 396(%esp), %esi
+ movl 392(%esp), %edx
+ movl 388(%esp), %ecx
+ movl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 312(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 348(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 340(%esp), %ebx
+ movl 336(%esp), %edi
+ movl 332(%esp), %esi
+ movl 328(%esp), %edx
+ movl 324(%esp), %ecx
+ movl 316(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl %eax, 108(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 1108(%esp), %eax
+ movl %eax, %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 108(%esp), %eax # 4-byte Reload
+ addl 248(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 288(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 276(%esp), %ebx
+ movl 272(%esp), %edi
+ movl 268(%esp), %edx
+ movl 264(%esp), %ecx
+ movl 260(%esp), %eax
+ movl 252(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 256(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ movl 1104(%esp), %ebp
+ movl %esi, 48(%ebp)
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 20(%esp), %esi # 4-byte Reload
+ adcl %esi, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 232(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 220(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 212(%esp), %ebx
+ movl 208(%esp), %edx
+ movl 204(%esp), %ecx
+ movl 200(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl 192(%esp), %esi
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl 1104(%esp), %edi
+ movl %ebp, 52(%edi)
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl %edi, 40(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 64(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 120(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 124(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 128(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp
+ movl 144(%esp), %edi
+ movl 140(%esp), %esi
+ movl 136(%esp), %edx
+ movl 132(%esp), %ecx
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebx # 4-byte Reload
+ movl %ebx, 56(%eax)
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl %ebx, 60(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %ebx # 4-byte Reload
+ movl %ebx, 64(%eax)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 76(%eax)
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 80(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, 84(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 92(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl %ecx, 108(%eax)
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 112(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 116(%eax)
+ addl $1084, %esp # imm = 0x43C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end224:
+ .size mcl_fpDbl_mulPre15Lbmi2, .Lfunc_end224-mcl_fpDbl_mulPre15Lbmi2
+
+ .globl mcl_fpDbl_sqrPre15Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre15Lbmi2,@function
+mcl_fpDbl_sqrPre15Lbmi2: # @mcl_fpDbl_sqrPre15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1084, %esp # imm = 0x43C
+ calll .L225$pb
+.L225$pb:
+ popl %ebx
+.Ltmp46:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp46-.L225$pb), %ebx
+ movl %ebx, 116(%esp) # 4-byte Spill
+ movl 1108(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl %edx, %edi
+ movl %ebx, %esi
+ calll .LmulPv480x32
+ movl 1076(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1072(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1068(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1016(%esp), %eax
+ movl 1020(%esp), %ebp
+ movl 1104(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl %esi, %ebx
+ calll .LmulPv480x32
+ addl 952(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 1012(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1008(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1004(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1000(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 996(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 992(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 972(%esp), %edi
+ movl 968(%esp), %esi
+ movl 964(%esp), %edx
+ movl 956(%esp), %eax
+ movl 960(%esp), %ecx
+ movl 1104(%esp), %ebp
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 72(%esp), %eax # 4-byte Reload
+ addl 888(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 948(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 944(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 940(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 936(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 932(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 928(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 924(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 916(%esp), %ebx
+ movl 912(%esp), %edi
+ movl 908(%esp), %esi
+ movl 904(%esp), %edx
+ movl 900(%esp), %ecx
+ movl 892(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 72(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 112(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 824(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 876(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 872(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 868(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 864(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 856(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 852(%esp), %ebx
+ movl 848(%esp), %edi
+ movl 844(%esp), %esi
+ movl 840(%esp), %edx
+ movl 836(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 760(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 788(%esp), %ebx
+ movl 784(%esp), %edi
+ movl 780(%esp), %esi
+ movl 776(%esp), %edx
+ movl 772(%esp), %ecx
+ movl 764(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 732(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 724(%esp), %ebx
+ movl 720(%esp), %edi
+ movl 716(%esp), %esi
+ movl 712(%esp), %edx
+ movl 708(%esp), %ecx
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 632(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %edx
+ movl 644(%esp), %ecx
+ movl 636(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 568(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 596(%esp), %ebx
+ movl 592(%esp), %edi
+ movl 588(%esp), %esi
+ movl 584(%esp), %edx
+ movl 580(%esp), %ecx
+ movl 572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 504(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 532(%esp), %ebx
+ movl 528(%esp), %edi
+ movl 524(%esp), %esi
+ movl 520(%esp), %edx
+ movl 516(%esp), %ecx
+ movl 508(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 440(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 468(%esp), %ebx
+ movl 464(%esp), %edi
+ movl 460(%esp), %esi
+ movl 456(%esp), %edx
+ movl 452(%esp), %ecx
+ movl 444(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 376(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 404(%esp), %ebx
+ movl 400(%esp), %edi
+ movl 396(%esp), %esi
+ movl 392(%esp), %edx
+ movl 388(%esp), %ecx
+ movl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 44(%edx), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 312(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 348(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 340(%esp), %ebx
+ movl 336(%esp), %edi
+ movl 332(%esp), %esi
+ movl 328(%esp), %edx
+ movl 324(%esp), %ecx
+ movl 316(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl %eax, 108(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 48(%edx), %eax
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 108(%esp), %eax # 4-byte Reload
+ addl 248(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 288(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 276(%esp), %ebx
+ movl 272(%esp), %edi
+ movl 268(%esp), %edx
+ movl 264(%esp), %ecx
+ movl 260(%esp), %eax
+ movl 252(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 256(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ movl 1104(%esp), %ebp
+ movl %esi, 48(%ebp)
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 20(%esp), %esi # 4-byte Reload
+ adcl %esi, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 52(%edx), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 232(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 220(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 212(%esp), %ebx
+ movl 208(%esp), %edx
+ movl 204(%esp), %ecx
+ movl 200(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl 192(%esp), %esi
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl 1104(%esp), %edi
+ movl %ebp, 52(%edi)
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl %edi, 40(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 64(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 56(%edx), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 120(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 124(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 128(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp
+ movl 144(%esp), %edi
+ movl 140(%esp), %esi
+ movl 136(%esp), %edx
+ movl 132(%esp), %ecx
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebx # 4-byte Reload
+ movl %ebx, 56(%eax)
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl %ebx, 60(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %ebx # 4-byte Reload
+ movl %ebx, 64(%eax)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 76(%eax)
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 80(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, 84(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 92(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl %ecx, 108(%eax)
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 112(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 116(%eax)
+ addl $1084, %esp # imm = 0x43C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end225:
+ .size mcl_fpDbl_sqrPre15Lbmi2, .Lfunc_end225-mcl_fpDbl_sqrPre15Lbmi2
+
+ .globl mcl_fp_mont15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont15Lbmi2,@function
+mcl_fp_mont15Lbmi2: # @mcl_fp_mont15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2044, %esp # imm = 0x7FC
+ calll .L226$pb
+.L226$pb:
+ popl %ebx
+.Ltmp47:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp47-.L226$pb), %ebx
+ movl 2076(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1976(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 1976(%esp), %ebp
+ movl 1980(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2036(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 2032(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2028(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 2024(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2020(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2016(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2012(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2008(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 2004(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2000(%esp), %edi
+ movl 1996(%esp), %esi
+ movl 1992(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 1988(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1984(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1912(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ addl 1912(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1916(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1920(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1924(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1928(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1932(%esp), %esi
+ adcl 1936(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1944(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1948(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1956(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1960(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1964(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1968(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1972(%esp), %ebp
+ sbbl %eax, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1848(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 116(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 88(%esp), %edx # 4-byte Reload
+ addl 1848(%esp), %edx
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1852(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1856(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1860(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1864(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1868(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1872(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1876(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1880(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1884(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1888(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1892(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1896(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1900(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1904(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ adcl 1908(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1784(%esp), %ecx
+ movl 2076(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1784(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1804(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1812(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1836(%esp), %esi
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1840(%esp), %ebp
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1844(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1720(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 1720(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1724(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1728(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1732(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1740(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1744(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1756(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 1768(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl 1772(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1780(%esp), %esi
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1656(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ addl 1656(%esp), %eax
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1664(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1680(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 1688(%esp), %ebp
+ adcl 1692(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1696(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1700(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1704(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1708(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 1712(%esp), %edi
+ adcl 1716(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1592(%esp), %ecx
+ movl 2068(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1592(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1600(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1604(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1608(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1620(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1628(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1640(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1644(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1528(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1528(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1544(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1564(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 1568(%esp), %edi
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1572(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1576(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1580(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1584(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1464(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 92(%esp), %ecx # 4-byte Reload
+ addl 1464(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1476(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1484(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1488(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1496(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1500(%esp), %edi
+ movl %edi, 112(%esp) # 4-byte Spill
+ adcl 1504(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 1512(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1516(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1400(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 92(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1400(%esp), %edi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1404(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1408(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1412(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1416(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 1420(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1424(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1428(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 1432(%esp), %edi
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1436(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1440(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 1448(%esp), %esi
+ movl %esi, %ebp
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1452(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1460(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1336(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1336(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1364(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1380(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ adcl 1384(%esp), %esi
+ movl %esi, %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1392(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1272(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 80(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1272(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1276(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1280(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1284(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1288(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1300(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1312(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 1320(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1328(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 2072(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 1208(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1232(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1244(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1264(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1268(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ addl 1144(%esp), %eax
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1156(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1168(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1180(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1196(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1080(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 1080(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1088(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1092(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1128(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1016(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1028(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1032(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1044(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 1060(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 952(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 964(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 976(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 992(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl %ebp, %eax
+ andl $1, %eax
+ addl 888(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 892(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 896(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 900(%esp), %edi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 904(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 908(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 912(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 916(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 920(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 924(%esp), %ebp
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 928(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 932(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 936(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 940(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 944(%esp), %esi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 824(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 832(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 856(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 864(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 872(%esp), %edi
+ adcl 876(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 760(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 776(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 800(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 804(%esp), %ebp
+ adcl 808(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 816(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 696(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 708(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 736(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 748(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 752(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 632(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 656(%esp), %edi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 672(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 688(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 568(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 588(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 596(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 604(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 504(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 516(%esp), %edi
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 520(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 532(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 560(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 440(%esp), %ecx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 448(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ adcl 452(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 460(%esp), %edi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 492(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 376(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 388(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 396(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 404(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 416(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 312(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 320(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 336(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 348(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 352(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 248(%esp), %edi
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 252(%esp), %esi
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 256(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 288(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl %esi, %ecx
+ movl 96(%esp), %esi # 4-byte Reload
+ addl 184(%esp), %ecx
+ adcl 188(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 196(%esp), %ebp
+ adcl 200(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 104(%esp), %ebx # 4-byte Reload
+ andl $1, %ebx
+ addl 120(%esp), %edi
+ movl %ebp, %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 128(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 132(%esp), %edi
+ adcl 136(%esp), %esi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 156(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 160(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 168(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 172(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 176(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ adcl 180(%esp), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 104(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 2076(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %ecx
+ movl %edi, %eax
+ sbbl 8(%ebp), %eax
+ movl %esi, %ebx
+ sbbl 12(%ebp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ sbbl 16(%ebp), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 92(%esp), %ebx # 4-byte Reload
+ sbbl 20(%ebp), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ sbbl 24(%ebp), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ebp), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ sbbl 32(%ebp), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %ebx # 4-byte Reload
+ sbbl 36(%ebp), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ sbbl 40(%ebp), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ebx # 4-byte Reload
+ sbbl 44(%ebp), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 76(%esp), %ebx # 4-byte Reload
+ sbbl 48(%ebp), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl 52(%ebp), %ebx
+ movl %ebx, 88(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 56(%ebp), %ebx
+ movl %ebx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ebx # 4-byte Reload
+ movl 108(%esp), %ebp # 4-byte Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB226_2
+# BB#1:
+ movl %edx, %ebp
+.LBB226_2:
+ movl 2064(%esp), %edx
+ movl %ebp, (%edx)
+ testb %bl, %bl
+ movl 116(%esp), %ebp # 4-byte Reload
+ jne .LBB226_4
+# BB#3:
+ movl %ecx, %ebp
+.LBB226_4:
+ movl %ebp, 4(%edx)
+ jne .LBB226_6
+# BB#5:
+ movl %eax, %edi
+.LBB226_6:
+ movl %edi, 8(%edx)
+ jne .LBB226_8
+# BB#7:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB226_8:
+ movl %esi, 12(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ jne .LBB226_10
+# BB#9:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB226_10:
+ movl %eax, 16(%edx)
+ movl 92(%esp), %eax # 4-byte Reload
+ jne .LBB226_12
+# BB#11:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB226_12:
+ movl %eax, 20(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ jne .LBB226_14
+# BB#13:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB226_14:
+ movl %eax, 24(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ jne .LBB226_16
+# BB#15:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB226_16:
+ movl %eax, 28(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ jne .LBB226_18
+# BB#17:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB226_18:
+ movl %eax, 32(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB226_20
+# BB#19:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB226_20:
+ movl %eax, 36(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ jne .LBB226_22
+# BB#21:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB226_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ jne .LBB226_24
+# BB#23:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB226_24:
+ movl %eax, 44(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ jne .LBB226_26
+# BB#25:
+ movl 52(%esp), %eax # 4-byte Reload
+.LBB226_26:
+ movl %eax, 48(%edx)
+ movl 100(%esp), %eax # 4-byte Reload
+ jne .LBB226_28
+# BB#27:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB226_28:
+ movl %eax, 52(%edx)
+ movl 112(%esp), %eax # 4-byte Reload
+ jne .LBB226_30
+# BB#29:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB226_30:
+ movl %eax, 56(%edx)
+ addl $2044, %esp # imm = 0x7FC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end226:
+ .size mcl_fp_mont15Lbmi2, .Lfunc_end226-mcl_fp_mont15Lbmi2
+
+ .globl mcl_fp_montNF15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF15Lbmi2,@function
+mcl_fp_montNF15Lbmi2: # @mcl_fp_montNF15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2028, %esp # imm = 0x7EC
+ calll .L227$pb
+.L227$pb:
+ popl %ebx
+.Ltmp48:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp48-.L227$pb), %ebx
+ movl 2060(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1960(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1960(%esp), %ebp
+ movl 1964(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2020(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2016(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2012(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2008(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 2004(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2000(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1996(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1992(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1988(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1984(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1980(%esp), %esi
+ movl 1976(%esp), %edi
+ movl 1972(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1968(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1896(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1896(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1900(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1904(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1908(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1912(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1916(%esp), %esi
+ movl %esi, %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1920(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1924(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1928(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1932(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1936(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1944(%esp), %ebp
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1948(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1956(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1832(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1892(%esp), %eax
+ movl 92(%esp), %edx # 4-byte Reload
+ addl 1832(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1836(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1840(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1844(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1848(%esp), %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1852(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1856(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1860(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1864(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1868(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1872(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1876(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 1880(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 1884(%esp), %ebp
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1888(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1768(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1768(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1772(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1784(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1804(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, %esi
+ adcl 1820(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1824(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1704(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1764(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1704(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1708(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1712(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1716(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1720(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1724(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1728(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1732(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 1736(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1740(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ adcl 1748(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1752(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1756(%esp), %ebp
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1760(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1640(%esp), %ecx
+ movl 2060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ addl 1640(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1664(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1680(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, %esi
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1688(%esp), %edi
+ adcl 1692(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1696(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1700(%esp), %ebp
+ movl 2056(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1576(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1636(%esp), %eax
+ movl 88(%esp), %edx # 4-byte Reload
+ addl 1576(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1580(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1584(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1588(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1592(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1596(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1600(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1604(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1608(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1612(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1616(%esp), %esi
+ adcl 1620(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1624(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1628(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1632(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1512(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1512(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1516(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 1532(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1544(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1548(%esp), %ebp
+ adcl 1552(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1564(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1568(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1572(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1448(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1508(%esp), %eax
+ movl 72(%esp), %edx # 4-byte Reload
+ addl 1448(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1452(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1460(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 1464(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1468(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1472(%esp), %edi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1476(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1480(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 1484(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1488(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1492(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1496(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1500(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 1504(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1384(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1384(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1408(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1432(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1440(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1444(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1320(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1380(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 1320(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 1324(%esp), %ebp
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1328(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1352(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1368(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1256(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 1256(%esp), %eax
+ adcl 1260(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl 1264(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1272(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 1296(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1304(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1312(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1192(%esp), %ecx
+ movl 2052(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ movl 1252(%esp), %eax
+ movl 48(%esp), %edx # 4-byte Reload
+ addl 1192(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1196(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1200(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 1204(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1208(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1212(%esp), %ebp
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1216(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1224(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1228(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1244(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1248(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1128(%esp), %ecx
+ movl 2060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ addl 1128(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1140(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1148(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1168(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1184(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 1188(%esp), %esi
+ movl 2056(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1064(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1124(%esp), %eax
+ movl 44(%esp), %edx # 4-byte Reload
+ addl 1064(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 1072(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1084(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1088(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1092(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1096(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 1100(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1104(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1108(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1112(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1116(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1120(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %esi
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1000(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1000(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1012(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1020(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1028(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1060(%esp), %esi
+ movl 2056(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 996(%esp), %eax
+ movl 52(%esp), %edx # 4-byte Reload
+ addl 936(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 940(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 944(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 952(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 956(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 960(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 964(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 968(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 972(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 976(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 980(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 984(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 988(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 992(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %esi
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 872(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 872(%esp), %edi
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 876(%esp), %ebp
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 880(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 932(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 808(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 868(%esp), %eax
+ movl %ebp, %ecx
+ addl 808(%esp), %ecx
+ adcl 812(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 816(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 820(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 824(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 828(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 832(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 836(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 840(%esp), %edi
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 844(%esp), %esi
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 848(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 852(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 856(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 860(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 864(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 744(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 744(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 776(%esp), %edi
+ adcl 780(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 792(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 680(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 740(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 680(%esp), %ecx
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 684(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 688(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 692(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 696(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ adcl 700(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 704(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 708(%esp), %edi
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 712(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 716(%esp), %ebp
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 720(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 724(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 728(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 732(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 736(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 616(%esp), %esi
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 620(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 644(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 648(%esp), %edi
+ adcl 652(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 612(%esp), %edx
+ movl %esi, %ecx
+ addl 552(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 572(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 580(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 588(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 488(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 508(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 512(%esp), %edi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 528(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 484(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 424(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 440(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ adcl 444(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %esi, %edi
+ adcl 460(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 360(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 368(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 376(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 396(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 400(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 296(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 356(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 296(%esp), %ecx
+ adcl 300(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 308(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 332(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 336(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 232(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 240(%esp), %ebp
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 244(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 272(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 276(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 228(%esp), %edx
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 168(%esp), %ecx
+ adcl 172(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ adcl 176(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 188(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 208(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 2060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ addl 104(%esp), %edi
+ movl 68(%esp), %edi # 4-byte Reload
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 112(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ adcl 116(%esp), %edi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 120(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 124(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 128(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 132(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 140(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 148(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 152(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 160(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 164(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 2060(%esp), %ecx
+ subl (%ecx), %edx
+ movl %ebx, %ebp
+ sbbl 4(%ecx), %ebp
+ movl %edi, %ebx
+ sbbl 8(%ecx), %ebx
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 12(%ecx), %eax
+ sbbl 16(%ecx), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ sbbl 20(%ecx), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 24(%ecx), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 28(%ecx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 32(%ecx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 36(%ecx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ sbbl 40(%ecx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ sbbl 44(%ecx), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ sbbl 48(%ecx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ sbbl 52(%ecx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ sbbl 56(%ecx), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ movl 100(%esp), %ecx # 4-byte Reload
+ js .LBB227_2
+# BB#1:
+ movl %edx, %ecx
+.LBB227_2:
+ movl 2048(%esp), %edx
+ movl %ecx, (%edx)
+ movl 92(%esp), %esi # 4-byte Reload
+ js .LBB227_4
+# BB#3:
+ movl %ebp, %esi
+.LBB227_4:
+ movl %esi, 4(%edx)
+ movl 88(%esp), %ecx # 4-byte Reload
+ js .LBB227_6
+# BB#5:
+ movl %ebx, %edi
+.LBB227_6:
+ movl %edi, 8(%edx)
+ js .LBB227_8
+# BB#7:
+ movl %eax, %ecx
+.LBB227_8:
+ movl %ecx, 12(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB227_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB227_10:
+ movl %eax, 16(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB227_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB227_12:
+ movl %eax, 20(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB227_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB227_14:
+ movl %eax, 24(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB227_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB227_16:
+ movl %eax, 28(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB227_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB227_18:
+ movl %eax, 32(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB227_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB227_20:
+ movl %eax, 36(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB227_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB227_22:
+ movl %eax, 40(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB227_24
+# BB#23:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB227_24:
+ movl %eax, 44(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB227_26
+# BB#25:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB227_26:
+ movl %eax, 48(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB227_28
+# BB#27:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB227_28:
+ movl %eax, 52(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB227_30
+# BB#29:
+ movl 68(%esp), %eax # 4-byte Reload
+.LBB227_30:
+ movl %eax, 56(%edx)
+ addl $2028, %esp # imm = 0x7EC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end227:
+ .size mcl_fp_montNF15Lbmi2, .Lfunc_end227-mcl_fp_montNF15Lbmi2
+
+ .globl mcl_fp_montRed15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed15Lbmi2,@function
+mcl_fp_montRed15Lbmi2: # @mcl_fp_montRed15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1148, %esp # imm = 0x47C
+ calll .L228$pb
+.L228$pb:
+ popl %eax
+.Ltmp49:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp49-.L228$pb), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1176(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 1172(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 116(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 100(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 96(%ecx), %esi
+ movl %esi, 152(%esp) # 4-byte Spill
+ movl 92(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 84(%ecx), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 80(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 168(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 164(%esp) # 4-byte Spill
+ movl 68(%ecx), %esi
+ movl %esi, 176(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 44(%ecx), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 12(%ecx), %edi
+ movl 8(%ecx), %esi
+ movl (%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 56(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 1080(%esp), %ecx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 80(%esp), %eax # 4-byte Reload
+ addl 1080(%esp), %eax
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ adcl 1088(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl 1092(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1108(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1016(%esp), %esi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1020(%esp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 1060(%esp), %ebp
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %esi
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 148(%esp) # 4-byte Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 952(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 956(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 992(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %ebp # 4-byte Reload
+ adcl 1004(%esp), %ebp
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 888(%esp), %esi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 892(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl 936(%esp), %ebp
+ movl %ebp, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 160(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 824(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 828(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 760(%esp), %esi
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 764(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 696(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 152(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 632(%esp), %edi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 636(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %ebp # 4-byte Reload
+ adcl 672(%esp), %ebp
+ movl 164(%esp), %edi # 4-byte Reload
+ adcl 676(%esp), %edi
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 568(%esp), %esi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 572(%esp), %ecx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ adcl 604(%esp), %ebp
+ movl %ebp, 176(%esp) # 4-byte Spill
+ adcl 608(%esp), %edi
+ movl %edi, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %edi # 4-byte Reload
+ adcl 616(%esp), %edi
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1176(%esp), %eax
+ movl %eax, %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 504(%esp), %esi
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 508(%esp), %ecx
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %esi # 4-byte Reload
+ adcl 524(%esp), %esi
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl 548(%esp), %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 440(%esp), %edi
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %edi # 4-byte Reload
+ adcl 452(%esp), %edi
+ adcl 456(%esp), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %esi # 4-byte Reload
+ adcl 464(%esp), %esi
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 376(%esp), %ebp
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 380(%esp), %ebp
+ adcl 384(%esp), %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %edi # 4-byte Reload
+ adcl 392(%esp), %edi
+ adcl 396(%esp), %esi
+ movl %esi, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %esi # 4-byte Reload
+ adcl 412(%esp), %esi
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 312(%esp), %ebp
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ adcl 324(%esp), %edi
+ movl %edi, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %ecx # 4-byte Reload
+ adcl 328(%esp), %ecx
+ movl %ecx, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl %ecx, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %ecx # 4-byte Reload
+ adcl 336(%esp), %ecx
+ movl %ecx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 340(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ adcl 344(%esp), %esi
+ movl %esi, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %ecx # 4-byte Reload
+ adcl 348(%esp), %ecx
+ movl %ecx, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp # 4-byte Reload
+ adcl 352(%esp), %ebp
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 356(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 360(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 364(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 368(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %eax, %edi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 248(%esp), %edi
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ adcl 284(%esp), %ebp
+ movl %ebp, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 288(%esp), %edi
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 308(%esp), %ebp
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 184(%esp), %esi
+ movl 172(%esp), %edx # 4-byte Reload
+ adcl 188(%esp), %edx
+ movl %edx, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %ecx # 4-byte Reload
+ adcl 192(%esp), %ecx
+ movl %ecx, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %esi # 4-byte Reload
+ adcl 204(%esp), %esi
+ movl %esi, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl 220(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 240(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %edx, %eax
+ subl 16(%esp), %edx # 4-byte Folded Reload
+ sbbl 4(%esp), %ecx # 4-byte Folded Reload
+ movl 176(%esp), %eax # 4-byte Reload
+ sbbl 8(%esp), %eax # 4-byte Folded Reload
+ movl 164(%esp), %ebp # 4-byte Reload
+ sbbl 12(%esp), %ebp # 4-byte Folded Reload
+ sbbl 20(%esp), %esi # 4-byte Folded Reload
+ movl 144(%esp), %edi # 4-byte Reload
+ sbbl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 160(%esp), %edi # 4-byte Reload
+ sbbl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 148(%esp), %edi # 4-byte Reload
+ sbbl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ sbbl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 152(%esp), %edi # 4-byte Reload
+ sbbl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ sbbl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 112(%esp) # 4-byte Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ sbbl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ sbbl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ sbbl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ sbbl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 156(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ movl %ebx, %edi
+ jne .LBB228_2
+# BB#1:
+ movl %edx, 172(%esp) # 4-byte Spill
+.LBB228_2:
+ movl 1168(%esp), %edx
+ movl 172(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%edx)
+ movl %edi, %ebx
+ testb %bl, %bl
+ jne .LBB228_4
+# BB#3:
+ movl %ecx, 180(%esp) # 4-byte Spill
+.LBB228_4:
+ movl 180(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%edx)
+ movl 176(%esp), %ecx # 4-byte Reload
+ jne .LBB228_6
+# BB#5:
+ movl %eax, %ecx
+.LBB228_6:
+ movl %ecx, 8(%edx)
+ movl 164(%esp), %eax # 4-byte Reload
+ jne .LBB228_8
+# BB#7:
+ movl %ebp, %eax
+.LBB228_8:
+ movl %eax, 12(%edx)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl 148(%esp), %eax # 4-byte Reload
+ movl 168(%esp), %ebp # 4-byte Reload
+ jne .LBB228_10
+# BB#9:
+ movl %esi, %ebp
+.LBB228_10:
+ movl %ebp, 16(%edx)
+ movl 152(%esp), %ebp # 4-byte Reload
+ movl 144(%esp), %ebx # 4-byte Reload
+ jne .LBB228_12
+# BB#11:
+ movl 84(%esp), %ebx # 4-byte Reload
+.LBB228_12:
+ movl %ebx, 20(%edx)
+ movl 132(%esp), %ebx # 4-byte Reload
+ movl 160(%esp), %edi # 4-byte Reload
+ jne .LBB228_14
+# BB#13:
+ movl 88(%esp), %edi # 4-byte Reload
+.LBB228_14:
+ movl %edi, 24(%edx)
+ movl 128(%esp), %edi # 4-byte Reload
+ jne .LBB228_16
+# BB#15:
+ movl 92(%esp), %eax # 4-byte Reload
+.LBB228_16:
+ movl %eax, 28(%edx)
+ movl 116(%esp), %esi # 4-byte Reload
+ jne .LBB228_18
+# BB#17:
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, 136(%esp) # 4-byte Spill
+.LBB228_18:
+ movl 136(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%edx)
+ jne .LBB228_20
+# BB#19:
+ movl 100(%esp), %ebp # 4-byte Reload
+.LBB228_20:
+ movl %ebp, 36(%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB228_22
+# BB#21:
+ movl 112(%esp), %ebx # 4-byte Reload
+.LBB228_22:
+ movl %ebx, 40(%edx)
+ jne .LBB228_24
+# BB#23:
+ movl 120(%esp), %edi # 4-byte Reload
+.LBB228_24:
+ movl %edi, 44(%edx)
+ jne .LBB228_26
+# BB#25:
+ movl 124(%esp), %esi # 4-byte Reload
+.LBB228_26:
+ movl %esi, 48(%edx)
+ jne .LBB228_28
+# BB#27:
+ movl 140(%esp), %eax # 4-byte Reload
+.LBB228_28:
+ movl %eax, 52(%edx)
+ jne .LBB228_30
+# BB#29:
+ movl 156(%esp), %ecx # 4-byte Reload
+.LBB228_30:
+ movl %ecx, 56(%edx)
+ addl $1148, %esp # imm = 0x47C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end228:
+ .size mcl_fp_montRed15Lbmi2, .Lfunc_end228-mcl_fp_montRed15Lbmi2
+
+ .globl mcl_fp_addPre15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre15Lbmi2,@function
+mcl_fp_addPre15Lbmi2: # @mcl_fp_addPre15Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %ebx
+ adcl 8(%ecx), %ebx
+ movl 16(%esp), %edi
+ movl %edx, (%edi)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%edi)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %ebx, 8(%edi)
+ movl 20(%eax), %ebx
+ movl %edx, 12(%edi)
+ movl 20(%ecx), %edx
+ adcl %ebx, %edx
+ movl 24(%eax), %ebx
+ movl %esi, 16(%edi)
+ movl 24(%ecx), %esi
+ adcl %ebx, %esi
+ movl 28(%eax), %ebx
+ movl %edx, 20(%edi)
+ movl 28(%ecx), %edx
+ adcl %ebx, %edx
+ movl 32(%eax), %ebx
+ movl %esi, 24(%edi)
+ movl 32(%ecx), %esi
+ adcl %ebx, %esi
+ movl 36(%eax), %ebx
+ movl %edx, 28(%edi)
+ movl 36(%ecx), %edx
+ adcl %ebx, %edx
+ movl 40(%eax), %ebx
+ movl %esi, 32(%edi)
+ movl 40(%ecx), %esi
+ adcl %ebx, %esi
+ movl 44(%eax), %ebx
+ movl %edx, 36(%edi)
+ movl 44(%ecx), %edx
+ adcl %ebx, %edx
+ movl 48(%eax), %ebx
+ movl %esi, 40(%edi)
+ movl 48(%ecx), %esi
+ adcl %ebx, %esi
+ movl 52(%eax), %ebx
+ movl %edx, 44(%edi)
+ movl 52(%ecx), %edx
+ adcl %ebx, %edx
+ movl %esi, 48(%edi)
+ movl %edx, 52(%edi)
+ movl 56(%eax), %eax
+ movl 56(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 56(%edi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end229:
+ .size mcl_fp_addPre15Lbmi2, .Lfunc_end229-mcl_fp_addPre15Lbmi2
+
+ .globl mcl_fp_subPre15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre15Lbmi2,@function
+mcl_fp_subPre15Lbmi2: # @mcl_fp_subPre15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebp
+ sbbl 8(%edx), %ebp
+ movl 20(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebx)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebp, 8(%ebx)
+ movl 20(%edx), %ebp
+ movl %esi, 12(%ebx)
+ movl 20(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 24(%edx), %ebp
+ movl %edi, 16(%ebx)
+ movl 24(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 28(%edx), %ebp
+ movl %esi, 20(%ebx)
+ movl 28(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 32(%edx), %ebp
+ movl %edi, 24(%ebx)
+ movl 32(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 36(%edx), %ebp
+ movl %esi, 28(%ebx)
+ movl 36(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 40(%edx), %ebp
+ movl %edi, 32(%ebx)
+ movl 40(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 44(%edx), %ebp
+ movl %esi, 36(%ebx)
+ movl 44(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 48(%edx), %ebp
+ movl %edi, 40(%ebx)
+ movl 48(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 52(%edx), %ebp
+ movl %esi, 44(%ebx)
+ movl 52(%ecx), %esi
+ sbbl %ebp, %esi
+ movl %edi, 48(%ebx)
+ movl %esi, 52(%ebx)
+ movl 56(%edx), %edx
+ movl 56(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 56(%ebx)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end230:
+ .size mcl_fp_subPre15Lbmi2, .Lfunc_end230-mcl_fp_subPre15Lbmi2
+
+ .globl mcl_fp_shr1_15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_15Lbmi2,@function
+mcl_fp_shr1_15Lbmi2: # @mcl_fp_shr1_15Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 48(%ecx)
+ movl 56(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl %esi, 52(%ecx)
+ shrl %eax
+ movl %eax, 56(%ecx)
+ popl %esi
+ retl
+.Lfunc_end231:
+ .size mcl_fp_shr1_15Lbmi2, .Lfunc_end231-mcl_fp_shr1_15Lbmi2
+
+ .globl mcl_fp_add15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add15Lbmi2,@function
+mcl_fp_add15Lbmi2: # @mcl_fp_add15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $48, %esp
+ movl 76(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 72(%esp), %eax
+ addl (%eax), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ adcl 4(%eax), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 8(%ecx), %edx
+ adcl 8(%eax), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 12(%eax), %esi
+ movl 16(%eax), %edx
+ adcl 12(%ecx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 16(%ecx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 20(%eax), %edx
+ adcl 20(%ecx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 24(%eax), %edx
+ adcl 24(%ecx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 28(%eax), %edx
+ adcl 28(%ecx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 32(%eax), %edx
+ adcl 32(%ecx), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 36(%eax), %edx
+ adcl 36(%ecx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 40(%eax), %edx
+ adcl 40(%ecx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 44(%eax), %ebx
+ adcl 44(%ecx), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 48(%eax), %ebp
+ adcl 48(%ecx), %ebp
+ movl 52(%eax), %edi
+ adcl 52(%ecx), %edi
+ movl 56(%eax), %edx
+ adcl 56(%ecx), %edx
+ movl 68(%esp), %ecx
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ecx)
+ movl 44(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%ecx)
+ movl 40(%esp), %esi # 4-byte Reload
+ movl %esi, 8(%ecx)
+ movl 36(%esp), %esi # 4-byte Reload
+ movl %esi, 12(%ecx)
+ movl 32(%esp), %esi # 4-byte Reload
+ movl %esi, 16(%ecx)
+ movl 28(%esp), %esi # 4-byte Reload
+ movl %esi, 20(%ecx)
+ movl 24(%esp), %esi # 4-byte Reload
+ movl %esi, 24(%ecx)
+ movl 20(%esp), %esi # 4-byte Reload
+ movl %esi, 28(%ecx)
+ movl 16(%esp), %esi # 4-byte Reload
+ movl %esi, 32(%ecx)
+ movl 12(%esp), %esi # 4-byte Reload
+ movl %esi, 36(%ecx)
+ movl 8(%esp), %esi # 4-byte Reload
+ movl %esi, 40(%ecx)
+ movl %ebx, 44(%ecx)
+ movl %ebp, 48(%ecx)
+ movl %edi, 52(%ecx)
+ movl %edx, 56(%ecx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 80(%esp), %esi
+ subl (%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 4(%esi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ sbbl 8(%esi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ sbbl 12(%esi), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ sbbl 16(%esi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ sbbl 20(%esi), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ sbbl 24(%esi), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %edx # 4-byte Reload
+ sbbl 28(%esi), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %edx # 4-byte Reload
+ sbbl 32(%esi), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %edx # 4-byte Reload
+ sbbl 36(%esi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %edx # 4-byte Reload
+ sbbl 40(%esi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl (%esp), %edx # 4-byte Reload
+ sbbl 44(%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ sbbl 48(%esi), %ebp
+ sbbl 52(%esi), %edi
+ sbbl 56(%esi), %eax
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB232_2
+# BB#1: # %nocarry
+ movl 4(%esp), %edx # 4-byte Reload
+ movl %edx, (%ecx)
+ movl 44(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%ecx)
+ movl 40(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%ecx)
+ movl 36(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%ecx)
+ movl 32(%esp), %edx # 4-byte Reload
+ movl %edx, 16(%ecx)
+ movl 28(%esp), %edx # 4-byte Reload
+ movl %edx, 20(%ecx)
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 24(%ecx)
+ movl 20(%esp), %edx # 4-byte Reload
+ movl %edx, 28(%ecx)
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 32(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ movl %edx, 36(%ecx)
+ movl 8(%esp), %edx # 4-byte Reload
+ movl %edx, 40(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ movl %edx, 44(%ecx)
+ movl %ebp, 48(%ecx)
+ movl %edi, 52(%ecx)
+ movl %eax, 56(%ecx)
+.LBB232_2: # %carry
+ addl $48, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end232:
+ .size mcl_fp_add15Lbmi2, .Lfunc_end232-mcl_fp_add15Lbmi2
+
+ .globl mcl_fp_addNF15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF15Lbmi2,@function
+mcl_fp_addNF15Lbmi2: # @mcl_fp_addNF15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $120, %esp
+ movl 148(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %edx
+ movl 144(%esp), %esi
+ addl (%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 4(%esi), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 52(%ecx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 44(%ecx), %ebp
+ movl 40(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ movl 20(%ecx), %ebx
+ movl 16(%ecx), %edi
+ movl 12(%ecx), %edx
+ movl 8(%ecx), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 12(%esi), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebx
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl 24(%esi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 28(%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 32(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esi), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 40(%esi), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 44(%esi), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 48(%esi), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 52(%esi), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 56(%esi), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 152(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ subl (%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ sbbl 4(%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 8(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ sbbl 16(%esi), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 20(%esi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 24(%esi), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ sbbl 28(%esi), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ sbbl 32(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 36(%esi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ sbbl 40(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ movl %edx, %eax
+ sbbl 44(%esi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, %edi
+ sbbl 48(%esi), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ movl %ecx, %ebx
+ sbbl 52(%esi), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ sbbl 56(%esi), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl %edi, %esi
+ sarl $31, %esi
+ testl %esi, %esi
+ movl 80(%esp), %esi # 4-byte Reload
+ js .LBB233_2
+# BB#1:
+ movl (%esp), %esi # 4-byte Reload
+.LBB233_2:
+ movl 140(%esp), %edi
+ movl %esi, (%edi)
+ movl 84(%esp), %ecx # 4-byte Reload
+ js .LBB233_4
+# BB#3:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB233_4:
+ movl %ecx, 4(%edi)
+ movl 104(%esp), %ecx # 4-byte Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ js .LBB233_6
+# BB#5:
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+.LBB233_6:
+ movl 76(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%edi)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB233_8
+# BB#7:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB233_8:
+ movl %eax, 12(%edi)
+ movl %ebx, %ebp
+ movl %edx, %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ js .LBB233_10
+# BB#9:
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB233_10:
+ movl %edx, 16(%edi)
+ movl 112(%esp), %edx # 4-byte Reload
+ movl 108(%esp), %ebx # 4-byte Reload
+ js .LBB233_12
+# BB#11:
+ movl 20(%esp), %esi # 4-byte Reload
+.LBB233_12:
+ movl %esi, 20(%edi)
+ js .LBB233_14
+# BB#13:
+ movl 24(%esp), %esi # 4-byte Reload
+ movl %esi, 88(%esp) # 4-byte Spill
+.LBB233_14:
+ movl 88(%esp), %esi # 4-byte Reload
+ movl %esi, 24(%edi)
+ js .LBB233_16
+# BB#15:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB233_16:
+ movl %ecx, 28(%edi)
+ js .LBB233_18
+# BB#17:
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 116(%esp) # 4-byte Spill
+.LBB233_18:
+ movl 116(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%edi)
+ js .LBB233_20
+# BB#19:
+ movl 36(%esp), %ebx # 4-byte Reload
+.LBB233_20:
+ movl %ebx, 36(%edi)
+ js .LBB233_22
+# BB#21:
+ movl 40(%esp), %edx # 4-byte Reload
+.LBB233_22:
+ movl %edx, 40(%edi)
+ js .LBB233_24
+# BB#23:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB233_24:
+ movl %eax, 44(%edi)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB233_26
+# BB#25:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB233_26:
+ movl %eax, 48(%edi)
+ js .LBB233_28
+# BB#27:
+ movl 52(%esp), %ebp # 4-byte Reload
+.LBB233_28:
+ movl %ebp, 52(%edi)
+ movl 100(%esp), %eax # 4-byte Reload
+ js .LBB233_30
+# BB#29:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB233_30:
+ movl %eax, 56(%edi)
+ addl $120, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end233:
+ .size mcl_fp_addNF15Lbmi2, .Lfunc_end233-mcl_fp_addNF15Lbmi2
+
+ .globl mcl_fp_sub15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub15Lbmi2,@function
+mcl_fp_sub15Lbmi2: # @mcl_fp_sub15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 80(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 84(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ sbbl 32(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 40(%esi), %edx
+ sbbl 40(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 44(%esi), %ecx
+ sbbl 44(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 48(%esi), %eax
+ sbbl 48(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 52(%esi), %ebp
+ sbbl 52(%edi), %ebp
+ movl 56(%esi), %esi
+ sbbl 56(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 76(%esp), %ebx
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%ebx)
+ movl %edx, 40(%ebx)
+ movl %ecx, 44(%ebx)
+ movl %eax, 48(%ebx)
+ movl %ebp, 52(%ebx)
+ movl %esi, 56(%ebx)
+ je .LBB234_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 88(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 8(%esi), %edi
+ movl 12(%esi), %eax
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 44(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl 48(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%ebx)
+ movl %ecx, 48(%ebx)
+ movl 52(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 52(%ebx)
+ movl 56(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%ebx)
+.LBB234_2: # %nocarry
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end234:
+ .size mcl_fp_sub15Lbmi2, .Lfunc_end234-mcl_fp_sub15Lbmi2
+
+ .globl mcl_fp_subNF15Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF15Lbmi2,@function
+mcl_fp_subNF15Lbmi2: # @mcl_fp_subNF15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $96, %esp
+ movl 120(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 124(%esp), %edi
+ subl (%edi), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ sbbl 4(%edi), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 52(%ecx), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 32(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 32(%edi), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 52(%edi), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 56(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ sarl $31, %ebp
+ movl %ebp, %edi
+ shldl $1, %eax, %edi
+ movl 128(%esp), %edx
+ andl (%edx), %edi
+ movl 56(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 48(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 44(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 40(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 36(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 20(%edx), %ebx
+ andl %ebp, %ebx
+ movl 16(%edx), %esi
+ andl %ebp, %esi
+ movl 12(%edx), %ecx
+ andl %ebp, %ecx
+ movl 8(%edx), %eax
+ andl %ebp, %eax
+ andl 4(%edx), %ebp
+ addl 60(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl 116(%esp), %edx
+ movl %edi, (%edx)
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 4(%edx)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 8(%edx)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, 12(%edx)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 16(%edx)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 20(%edx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%edx)
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%edx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%edx)
+ movl 16(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%edx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%edx)
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%edx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 48(%edx)
+ movl %eax, 52(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%edx)
+ addl $96, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end235:
+ .size mcl_fp_subNF15Lbmi2, .Lfunc_end235-mcl_fp_subNF15Lbmi2
+
+ .globl mcl_fpDbl_add15Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add15Lbmi2,@function
+mcl_fpDbl_add15Lbmi2: # @mcl_fpDbl_add15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ movl 136(%esp), %ecx
+ movl 132(%esp), %edx
+ movl 12(%edx), %edi
+ movl 16(%edx), %esi
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edx), %ebp
+ movl 128(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edx), %ebp
+ adcl 8(%edx), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %esi
+ movl %ebp, 4(%eax)
+ movl 68(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%edx), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %esi, 16(%eax)
+ movl 24(%edx), %esi
+ adcl %ebx, %esi
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%edx), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %esi, 24(%eax)
+ movl 32(%edx), %esi
+ adcl %ebx, %esi
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%edx), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %esi, 32(%eax)
+ movl 40(%edx), %esi
+ adcl %ebx, %esi
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%edx), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %esi, 40(%eax)
+ movl 48(%edx), %esi
+ adcl %ebx, %esi
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%edx), %edi
+ adcl %ebx, %edi
+ movl 56(%ecx), %ebx
+ movl %esi, 48(%eax)
+ movl 56(%edx), %esi
+ adcl %ebx, %esi
+ movl 60(%ecx), %ebx
+ movl %edi, 52(%eax)
+ movl 60(%edx), %edi
+ adcl %ebx, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 64(%ecx), %edi
+ movl %esi, 56(%eax)
+ movl 64(%edx), %eax
+ adcl %edi, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%edx), %eax
+ adcl %ebp, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl 72(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl 76(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl 80(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl 84(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%ecx), %esi
+ movl 88(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%ecx), %esi
+ movl 92(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%ecx), %esi
+ movl 96(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%ecx), %esi
+ movl 100(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%ecx), %eax
+ movl 104(%edx), %esi
+ adcl %eax, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 108(%ecx), %edi
+ movl 108(%edx), %eax
+ adcl %edi, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 112(%ecx), %ebx
+ movl 112(%edx), %edi
+ adcl %ebx, %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 116(%ecx), %ecx
+ movl 116(%edx), %edx
+ adcl %ecx, %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 140(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ subl (%ebp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 40(%ebp), %ecx
+ sbbl 44(%ebp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ sbbl 48(%ebp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl %edx, %edi
+ sbbl 52(%ebp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %esi
+ sbbl 56(%ebp), %esi
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB236_2
+# BB#1:
+ movl %esi, %edi
+.LBB236_2:
+ testb %bl, %bl
+ movl 76(%esp), %eax # 4-byte Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ movl 68(%esp), %ebx # 4-byte Reload
+ movl 64(%esp), %ebp # 4-byte Reload
+ jne .LBB236_4
+# BB#3:
+ movl %ecx, %esi
+ movl (%esp), %ebx # 4-byte Reload
+ movl 4(%esp), %ebp # 4-byte Reload
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB236_4:
+ movl 128(%esp), %edx
+ movl %eax, 60(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 64(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%edx)
+ movl 88(%esp), %eax # 4-byte Reload
+ movl %eax, 72(%edx)
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%edx)
+ movl 100(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%edx)
+ movl %ebp, 92(%edx)
+ movl %ebx, 96(%edx)
+ movl %esi, 100(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ jne .LBB236_6
+# BB#5:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB236_6:
+ movl %eax, 104(%edx)
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ jne .LBB236_8
+# BB#7:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB236_8:
+ movl %eax, 108(%edx)
+ jne .LBB236_10
+# BB#9:
+ movl 48(%esp), %ecx # 4-byte Reload
+.LBB236_10:
+ movl %ecx, 112(%edx)
+ movl %edi, 116(%edx)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end236:
+ .size mcl_fpDbl_add15Lbmi2, .Lfunc_end236-mcl_fpDbl_add15Lbmi2
+
+ .globl mcl_fpDbl_sub15Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub15Lbmi2,@function
+mcl_fpDbl_sub15Lbmi2: # @mcl_fpDbl_sub15Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 124(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 128(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %esi
+ movl 8(%eax), %edi
+ sbbl 8(%ebp), %edi
+ movl 120(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 12(%eax), %edx
+ sbbl 12(%ebp), %edx
+ movl %esi, 4(%ecx)
+ movl 16(%eax), %esi
+ sbbl 16(%ebp), %esi
+ movl %edi, 8(%ecx)
+ movl 20(%ebp), %edi
+ movl %edx, 12(%ecx)
+ movl 20(%eax), %edx
+ sbbl %edi, %edx
+ movl 24(%ebp), %edi
+ movl %esi, 16(%ecx)
+ movl 24(%eax), %esi
+ sbbl %edi, %esi
+ movl 28(%ebp), %edi
+ movl %edx, 20(%ecx)
+ movl 28(%eax), %edx
+ sbbl %edi, %edx
+ movl 32(%ebp), %edi
+ movl %esi, 24(%ecx)
+ movl 32(%eax), %esi
+ sbbl %edi, %esi
+ movl 36(%ebp), %edi
+ movl %edx, 28(%ecx)
+ movl 36(%eax), %edx
+ sbbl %edi, %edx
+ movl 40(%ebp), %edi
+ movl %esi, 32(%ecx)
+ movl 40(%eax), %esi
+ sbbl %edi, %esi
+ movl 44(%ebp), %edi
+ movl %edx, 36(%ecx)
+ movl 44(%eax), %edx
+ sbbl %edi, %edx
+ movl 48(%ebp), %edi
+ movl %esi, 40(%ecx)
+ movl 48(%eax), %esi
+ sbbl %edi, %esi
+ movl 52(%ebp), %edi
+ movl %edx, 44(%ecx)
+ movl 52(%eax), %edx
+ sbbl %edi, %edx
+ movl 56(%ebp), %edi
+ movl %esi, 48(%ecx)
+ movl 56(%eax), %esi
+ sbbl %edi, %esi
+ movl 60(%ebp), %edi
+ movl %edx, 52(%ecx)
+ movl 60(%eax), %edx
+ sbbl %edi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 64(%ebp), %edx
+ movl %esi, 56(%ecx)
+ movl 64(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 68(%ebp), %edx
+ movl 68(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 72(%ebp), %edx
+ movl 72(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 76(%ebp), %edx
+ movl 76(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 80(%ebp), %edx
+ movl 80(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 84(%ebp), %edx
+ movl 84(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 88(%ebp), %edx
+ movl 88(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 92(%ebp), %edx
+ movl 92(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 96(%ebp), %edx
+ movl 96(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 100(%ebp), %edx
+ movl 100(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 104(%ebp), %edx
+ movl 104(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 108(%ebp), %edx
+ movl 108(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 112(%ebp), %edx
+ movl 112(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 116(%ebp), %edx
+ movl 116(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 132(%esp), %esi
+ jne .LBB237_1
+# BB#2:
+ movl $0, 60(%esp) # 4-byte Folded Spill
+ jmp .LBB237_3
+.LBB237_1:
+ movl 56(%esi), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+.LBB237_3:
+ testb %al, %al
+ jne .LBB237_4
+# BB#5:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ movl $0, %ebx
+ jmp .LBB237_6
+.LBB237_4:
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB237_6:
+ jne .LBB237_7
+# BB#8:
+ movl $0, 32(%esp) # 4-byte Folded Spill
+ jmp .LBB237_9
+.LBB237_7:
+ movl 52(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+.LBB237_9:
+ jne .LBB237_10
+# BB#11:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB237_12
+.LBB237_10:
+ movl 48(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+.LBB237_12:
+ jne .LBB237_13
+# BB#14:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB237_15
+.LBB237_13:
+ movl 44(%esi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB237_15:
+ jne .LBB237_16
+# BB#17:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB237_18
+.LBB237_16:
+ movl 40(%esi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB237_18:
+ jne .LBB237_19
+# BB#20:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB237_21
+.LBB237_19:
+ movl 36(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB237_21:
+ jne .LBB237_22
+# BB#23:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB237_24
+.LBB237_22:
+ movl 32(%esi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB237_24:
+ jne .LBB237_25
+# BB#26:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB237_27
+.LBB237_25:
+ movl 28(%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB237_27:
+ jne .LBB237_28
+# BB#29:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB237_30
+.LBB237_28:
+ movl 24(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB237_30:
+ jne .LBB237_31
+# BB#32:
+ movl $0, %edx
+ jmp .LBB237_33
+.LBB237_31:
+ movl 20(%esi), %edx
+.LBB237_33:
+ jne .LBB237_34
+# BB#35:
+ movl $0, %ebp
+ jmp .LBB237_36
+.LBB237_34:
+ movl 16(%esi), %ebp
+.LBB237_36:
+ jne .LBB237_37
+# BB#38:
+ movl $0, %eax
+ jmp .LBB237_39
+.LBB237_37:
+ movl 12(%esi), %eax
+.LBB237_39:
+ jne .LBB237_40
+# BB#41:
+ xorl %esi, %esi
+ jmp .LBB237_42
+.LBB237_40:
+ movl 8(%esi), %esi
+.LBB237_42:
+ addl 44(%esp), %ebx # 4-byte Folded Reload
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 60(%ecx)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 64(%ecx)
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 68(%ecx)
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, 76(%ecx)
+ movl (%esp), %esi # 4-byte Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 80(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 84(%ecx)
+ movl 8(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 96(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 100(%ecx)
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 104(%ecx)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 108(%ecx)
+ movl %eax, 112(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%ecx)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end237:
+ .size mcl_fpDbl_sub15Lbmi2, .Lfunc_end237-mcl_fpDbl_sub15Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv512x32,@function
+.LmulPv512x32: # @mulPv512x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl %edx, %eax
+ movl 76(%esp), %edi
+ movl %edi, %edx
+ mulxl 4(%eax), %ebx, %esi
+ movl %edi, %edx
+ mulxl (%eax), %ebp, %edx
+ movl %ebp, 52(%esp) # 4-byte Spill
+ addl %ebx, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 8(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 12(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 16(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 20(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 24(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 28(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 32(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 36(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 40(%eax), %edx, %ebx
+ adcl %esi, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 44(%eax), %edx, %esi
+ adcl %ebx, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %edi, %edx
+ mulxl 48(%eax), %ebx, %ebp
+ adcl %esi, %ebx
+ movl %edi, %edx
+ mulxl 52(%eax), %esi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %ebp, %esi
+ movl %edi, %edx
+ mulxl 56(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 44(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl 36(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%ecx)
+ movl 32(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%ecx)
+ movl 28(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%ecx)
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%ecx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%ecx)
+ movl %ebx, 48(%ecx)
+ movl %esi, 52(%ecx)
+ movl %edx, 56(%ecx)
+ movl %edi, %edx
+ mulxl 60(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%ecx)
+ adcl $0, %edx
+ movl %edx, 64(%ecx)
+ movl %ecx, %eax
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end238:
+ .size .LmulPv512x32, .Lfunc_end238-.LmulPv512x32
+
+ .globl mcl_fp_mulUnitPre16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre16Lbmi2,@function
+mcl_fp_mulUnitPre16Lbmi2: # @mcl_fp_mulUnitPre16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $124, %esp
+ calll .L239$pb
+.L239$pb:
+ popl %ebx
+.Ltmp50:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp50-.L239$pb), %ebx
+ movl 152(%esp), %eax
+ movl %eax, (%esp)
+ leal 56(%esp), %ecx
+ movl 148(%esp), %edx
+ calll .LmulPv512x32
+ movl 120(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 108(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 104(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp
+ movl 72(%esp), %ebx
+ movl 68(%esp), %edi
+ movl 64(%esp), %esi
+ movl 56(%esp), %edx
+ movl 60(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 64(%eax)
+ addl $124, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end239:
+ .size mcl_fp_mulUnitPre16Lbmi2, .Lfunc_end239-mcl_fp_mulUnitPre16Lbmi2
+
+ .globl mcl_fpDbl_mulPre16Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre16Lbmi2,@function
+mcl_fpDbl_mulPre16Lbmi2: # @mcl_fpDbl_mulPre16Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $300, %esp # imm = 0x12C
+ calll .L240$pb
+.L240$pb:
+ popl %ebx
+.Ltmp51:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp51-.L240$pb), %ebx
+ movl %ebx, -224(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl 12(%ebp), %esi
+ movl %esi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre8Lbmi2@PLT
+ leal 32(%edi), %eax
+ movl %eax, 8(%esp)
+ leal 32(%esi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 64(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre8Lbmi2@PLT
+ movl 52(%esi), %ebx
+ movl 48(%esi), %eax
+ movl 44(%esi), %ecx
+ movl 40(%esi), %edx
+ movl %edx, -176(%ebp) # 4-byte Spill
+ movl (%esi), %edi
+ movl 4(%esi), %edx
+ addl 32(%esi), %edi
+ movl %edi, -184(%ebp) # 4-byte Spill
+ movl %esi, %edi
+ adcl 36(%edi), %edx
+ movl %edx, -236(%ebp) # 4-byte Spill
+ movl -176(%ebp), %edx # 4-byte Reload
+ adcl 8(%edi), %edx
+ movl %edx, -176(%ebp) # 4-byte Spill
+ adcl 12(%edi), %ecx
+ movl %ecx, -232(%ebp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ adcl 20(%edi), %ebx
+ movl %ebx, -228(%ebp) # 4-byte Spill
+ movl 56(%edi), %eax
+ adcl 24(%edi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %ecx
+ popl %eax
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl (%esi), %ecx
+ addl 32(%esi), %ecx
+ movl %ecx, -188(%ebp) # 4-byte Spill
+ movl 4(%esi), %ecx
+ adcl 36(%esi), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ movl 40(%esi), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, -196(%ebp) # 4-byte Spill
+ movl 44(%esi), %ecx
+ adcl 12(%esi), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ movl 48(%esi), %ecx
+ adcl 16(%esi), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ movl 52(%esi), %ecx
+ adcl 20(%esi), %ecx
+ movl %ecx, -208(%ebp) # 4-byte Spill
+ movl 56(%esi), %edx
+ adcl 24(%esi), %edx
+ movl 60(%esi), %ecx
+ adcl 28(%esi), %ecx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %ebx
+ popl %eax
+ movl %ebx, -252(%ebp) # 4-byte Spill
+ movl -212(%ebp), %ebx # 4-byte Reload
+ movl -176(%ebp), %esi # 4-byte Reload
+ movl %esi, -216(%ebp) # 4-byte Spill
+ movl -184(%ebp), %esi # 4-byte Reload
+ movl %esi, -220(%ebp) # 4-byte Spill
+ jb .LBB240_2
+# BB#1:
+ xorl %eax, %eax
+ xorl %ebx, %ebx
+ movl $0, -216(%ebp) # 4-byte Folded Spill
+ movl $0, -220(%ebp) # 4-byte Folded Spill
+.LBB240_2:
+ movl %ebx, -244(%ebp) # 4-byte Spill
+ movl %eax, -240(%ebp) # 4-byte Spill
+ movl 60(%edi), %eax
+ movl -144(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 28(%edi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl %edx, -144(%ebp) # 4-byte Spill
+ movl -208(%ebp), %eax # 4-byte Reload
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl -204(%ebp), %eax # 4-byte Reload
+ movl %eax, -152(%ebp) # 4-byte Spill
+ movl -200(%ebp), %eax # 4-byte Reload
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl -196(%ebp), %eax # 4-byte Reload
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl -192(%ebp), %eax # 4-byte Reload
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl -188(%ebp), %eax # 4-byte Reload
+ movl %eax, -168(%ebp) # 4-byte Spill
+ jb .LBB240_4
+# BB#3:
+ movl $0, -172(%ebp) # 4-byte Folded Spill
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ movl $0, -160(%ebp) # 4-byte Folded Spill
+ movl $0, -164(%ebp) # 4-byte Folded Spill
+ movl $0, -168(%ebp) # 4-byte Folded Spill
+.LBB240_4:
+ movl -184(%ebp), %eax # 4-byte Reload
+ movl %eax, -108(%ebp)
+ movl -236(%ebp), %eax # 4-byte Reload
+ movl %eax, -104(%ebp)
+ movl -176(%ebp), %edi # 4-byte Reload
+ movl %edi, -100(%ebp)
+ movl -232(%ebp), %edi # 4-byte Reload
+ movl %edi, -96(%ebp)
+ movl -212(%ebp), %esi # 4-byte Reload
+ movl %esi, -92(%ebp)
+ movl -228(%ebp), %esi # 4-byte Reload
+ movl %esi, -88(%ebp)
+ movl -248(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -84(%ebp)
+ movl -188(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -140(%ebp)
+ movl -192(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -136(%ebp)
+ movl -196(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -132(%ebp)
+ movl -200(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -128(%ebp)
+ movl -204(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -124(%ebp)
+ movl -208(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -120(%ebp)
+ movl %esi, %ebx
+ movl %edi, %esi
+ movl %eax, %edi
+ movl %edx, -116(%ebp)
+ movl %ecx, -112(%ebp)
+ sbbl %edx, %edx
+ movl -180(%ebp), %eax # 4-byte Reload
+ movl %eax, -80(%ebp)
+ movl -252(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB240_6
+# BB#5:
+ movl $0, %eax
+ movl $0, %ebx
+ movl $0, %esi
+ movl $0, %edi
+.LBB240_6:
+ movl %eax, -180(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -140(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -108(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -76(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -220(%ebp), %eax # 4-byte Reload
+ addl %eax, -168(%ebp) # 4-byte Folded Spill
+ adcl %edi, -164(%ebp) # 4-byte Folded Spill
+ movl -216(%ebp), %eax # 4-byte Reload
+ adcl %eax, -160(%ebp) # 4-byte Folded Spill
+ adcl %esi, -156(%ebp) # 4-byte Folded Spill
+ movl -244(%ebp), %eax # 4-byte Reload
+ adcl %eax, -152(%ebp) # 4-byte Folded Spill
+ adcl %ebx, -148(%ebp) # 4-byte Folded Spill
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -240(%ebp), %eax # 4-byte Folded Reload
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl -172(%ebp), %edi # 4-byte Reload
+ adcl -180(%ebp), %edi # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -176(%ebp) # 4-byte Spill
+ movl -224(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre8Lbmi2@PLT
+ movl -168(%ebp), %eax # 4-byte Reload
+ addl -44(%ebp), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl -164(%ebp), %eax # 4-byte Reload
+ adcl -40(%ebp), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl -152(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ adcl -16(%ebp), %edi
+ movl %edi, -172(%ebp) # 4-byte Spill
+ adcl %esi, -176(%ebp) # 4-byte Folded Spill
+ movl -76(%ebp), %eax
+ movl 8(%ebp), %esi
+ subl (%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ movl -72(%ebp), %ecx
+ sbbl 4(%esi), %ecx
+ movl -68(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -192(%ebp) # 4-byte Spill
+ movl -64(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl -60(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl -56(%ebp), %eax
+ sbbl 20(%esi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ movl -52(%ebp), %eax
+ sbbl 24(%esi), %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ movl -48(%ebp), %eax
+ sbbl 28(%esi), %eax
+ movl %eax, -188(%ebp) # 4-byte Spill
+ movl 32(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -156(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ movl -144(%ebp), %edi # 4-byte Reload
+ sbbl %eax, %edi
+ movl 60(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ sbbl %eax, -172(%ebp) # 4-byte Folded Spill
+ sbbl $0, -176(%ebp) # 4-byte Folded Spill
+ movl 64(%esi), %eax
+ movl %eax, -260(%ebp) # 4-byte Spill
+ subl %eax, -196(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -264(%ebp) # 4-byte Spill
+ sbbl %eax, %ecx
+ movl 72(%esi), %eax
+ movl %eax, -268(%ebp) # 4-byte Spill
+ sbbl %eax, -192(%ebp) # 4-byte Folded Spill
+ movl 76(%esi), %eax
+ movl %eax, -272(%ebp) # 4-byte Spill
+ sbbl %eax, %edx
+ movl 80(%esi), %eax
+ movl %eax, -276(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 84(%esi), %eax
+ movl %eax, -280(%ebp) # 4-byte Spill
+ sbbl %eax, -180(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %eax
+ movl %eax, -284(%ebp) # 4-byte Spill
+ sbbl %eax, -184(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %eax
+ movl %eax, -288(%ebp) # 4-byte Spill
+ sbbl %eax, -188(%ebp) # 4-byte Folded Spill
+ movl 96(%esi), %eax
+ movl %eax, -292(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %eax
+ movl %eax, -236(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %eax
+ movl %eax, -240(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %eax
+ movl %eax, -244(%ebp) # 4-byte Spill
+ sbbl %eax, -156(%ebp) # 4-byte Folded Spill
+ movl 112(%esi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl 116(%esi), %eax
+ movl %eax, -252(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 120(%esi), %eax
+ movl %eax, -232(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl %edi, -144(%ebp) # 4-byte Spill
+ movl 124(%esi), %eax
+ movl %eax, -256(%ebp) # 4-byte Spill
+ sbbl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl -176(%ebp), %edi # 4-byte Reload
+ sbbl $0, %edi
+ movl -196(%ebp), %eax # 4-byte Reload
+ addl -200(%ebp), %eax # 4-byte Folded Reload
+ adcl -204(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%esi)
+ movl -192(%ebp), %eax # 4-byte Reload
+ adcl -208(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%esi)
+ adcl -212(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 40(%esi)
+ adcl -216(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 44(%esi)
+ movl -180(%ebp), %eax # 4-byte Reload
+ adcl -220(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 48(%esi)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ adcl -224(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 52(%esi)
+ movl -188(%ebp), %edx # 4-byte Reload
+ adcl -228(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 56(%esi)
+ movl -168(%ebp), %eax # 4-byte Reload
+ adcl -260(%ebp), %eax # 4-byte Folded Reload
+ movl %edx, 60(%esi)
+ movl -164(%ebp), %ecx # 4-byte Reload
+ adcl -264(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 64(%esi)
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -268(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 68(%esi)
+ movl -156(%ebp), %ecx # 4-byte Reload
+ adcl -272(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 72(%esi)
+ movl -152(%ebp), %eax # 4-byte Reload
+ adcl -276(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 76(%esi)
+ movl -148(%ebp), %ecx # 4-byte Reload
+ adcl -280(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 80(%esi)
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -284(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 84(%esi)
+ movl -172(%ebp), %ecx # 4-byte Reload
+ adcl -288(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 88(%esi)
+ adcl -292(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 92(%esi)
+ movl %edi, 96(%esi)
+ movl -236(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -240(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -244(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ movl -248(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 112(%esi)
+ movl -252(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 116(%esi)
+ movl -232(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 120(%esi)
+ movl -256(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 124(%esi)
+ addl $300, %esp # imm = 0x12C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end240:
+ .size mcl_fpDbl_mulPre16Lbmi2, .Lfunc_end240-mcl_fpDbl_mulPre16Lbmi2
+
+ .globl mcl_fpDbl_sqrPre16Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre16Lbmi2,@function
+mcl_fpDbl_sqrPre16Lbmi2: # @mcl_fpDbl_sqrPre16Lbmi2
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $300, %esp # imm = 0x12C
+ calll .L241$pb
+.L241$pb:
+ popl %ebx
+.Ltmp52:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp52-.L241$pb), %ebx
+ movl %ebx, -184(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre8Lbmi2@PLT
+ leal 32(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 64(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre8Lbmi2@PLT
+ movl 52(%edi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ movl 48(%edi), %eax
+ movl 44(%edi), %ebx
+ movl 40(%edi), %esi
+ movl (%edi), %ecx
+ movl 4(%edi), %edx
+ addl 32(%edi), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ adcl 36(%edi), %edx
+ movl %edx, -196(%ebp) # 4-byte Spill
+ adcl 8(%edi), %esi
+ movl %esi, -188(%ebp) # 4-byte Spill
+ adcl 12(%edi), %ebx
+ adcl 16(%edi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ movl -180(%ebp), %eax # 4-byte Reload
+ adcl 20(%edi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ addl %ecx, %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ adcl %edx, %edx
+ movl %edx, -160(%ebp) # 4-byte Spill
+ adcl %esi, %esi
+ movl %esi, -156(%ebp) # 4-byte Spill
+ movl %ebx, %edx
+ movl %ebx, %esi
+ adcl %edx, %edx
+ movl %edx, -152(%ebp) # 4-byte Spill
+ movl -208(%ebp), %eax # 4-byte Reload
+ movl %eax, %edx
+ movl %eax, %ebx
+ adcl %edx, %edx
+ movl %edx, -148(%ebp) # 4-byte Spill
+ movl -180(%ebp), %edx # 4-byte Reload
+ adcl %edx, %edx
+ movl %edx, -144(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl 56(%edi), %edx
+ movl -168(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ adcl 24(%edi), %edx
+ movl 60(%edi), %ecx
+ adcl 28(%edi), %ecx
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edi
+ sbbl %eax, %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ jb .LBB241_2
+# BB#1:
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ movl $0, -160(%ebp) # 4-byte Folded Spill
+ movl $0, -164(%ebp) # 4-byte Folded Spill
+.LBB241_2:
+ movl %edx, %eax
+ movl -172(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl %eax, %eax
+ movl %ecx, %edi
+ adcl %edi, %edi
+ movl %edi, -176(%ebp) # 4-byte Spill
+ movl -204(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB241_4
+# BB#3:
+ movl $0, -176(%ebp) # 4-byte Folded Spill
+ xorl %eax, %eax
+.LBB241_4:
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl -192(%ebp), %eax # 4-byte Reload
+ movl %eax, -108(%ebp)
+ movl %eax, -140(%ebp)
+ movl -196(%ebp), %eax # 4-byte Reload
+ movl %eax, -104(%ebp)
+ movl %eax, -136(%ebp)
+ movl -188(%ebp), %eax # 4-byte Reload
+ movl %eax, -100(%ebp)
+ movl %eax, -132(%ebp)
+ movl %esi, -96(%ebp)
+ movl %esi, -128(%ebp)
+ movl %ebx, -92(%ebp)
+ movl %ebx, -124(%ebp)
+ movl -180(%ebp), %eax # 4-byte Reload
+ movl %eax, -88(%ebp)
+ movl %eax, -120(%ebp)
+ movl %edx, -84(%ebp)
+ movl %edx, -116(%ebp)
+ movl %ecx, -80(%ebp)
+ movl %ecx, -112(%ebp)
+ movl -200(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB241_5
+# BB#6:
+ xorl %edi, %edi
+ jmp .LBB241_7
+.LBB241_5:
+ shrl $31, %ecx
+ movl %ecx, %edi
+.LBB241_7:
+ leal -140(%ebp), %eax
+ movl %eax, 8(%esp)
+ leal -108(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -76(%ebp), %eax
+ movl %eax, (%esp)
+ movl -168(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -184(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre8Lbmi2@PLT
+ movl -164(%ebp), %eax # 4-byte Reload
+ addl -44(%ebp), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -40(%ebp), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl -152(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl -172(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl -176(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ adcl %edi, %esi
+ movl %esi, -168(%ebp) # 4-byte Spill
+ movl -76(%ebp), %ecx
+ movl 8(%ebp), %esi
+ subl (%esi), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ movl -72(%ebp), %edi
+ sbbl 4(%esi), %edi
+ movl -68(%ebp), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, -184(%ebp) # 4-byte Spill
+ movl -64(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, -192(%ebp) # 4-byte Spill
+ movl -60(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl %eax, %ecx
+ movl -56(%ebp), %eax
+ sbbl 20(%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ movl -52(%ebp), %edx
+ sbbl 24(%esi), %edx
+ movl %edx, -188(%ebp) # 4-byte Spill
+ movl -48(%ebp), %edx
+ sbbl 28(%esi), %edx
+ movl 32(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -156(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ sbbl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl 60(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ sbbl %eax, %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ movl -168(%ebp), %eax # 4-byte Reload
+ sbbl $0, %eax
+ movl 64(%esi), %ecx
+ movl %ecx, -260(%ebp) # 4-byte Spill
+ subl %ecx, -180(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %ecx
+ movl %ecx, -264(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 72(%esi), %ecx
+ movl %ecx, -268(%ebp) # 4-byte Spill
+ sbbl %ecx, -184(%ebp) # 4-byte Folded Spill
+ movl 76(%esi), %ecx
+ movl %ecx, -272(%ebp) # 4-byte Spill
+ sbbl %ecx, -192(%ebp) # 4-byte Folded Spill
+ movl 80(%esi), %ecx
+ movl %ecx, -276(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 84(%esi), %ecx
+ movl %ecx, -280(%ebp) # 4-byte Spill
+ sbbl %ecx, -196(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %ecx
+ movl %ecx, -284(%ebp) # 4-byte Spill
+ sbbl %ecx, -188(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %ecx
+ movl %ecx, -288(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 96(%esi), %ecx
+ movl %ecx, -292(%ebp) # 4-byte Spill
+ sbbl %ecx, -164(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %ecx
+ movl %ecx, -232(%ebp) # 4-byte Spill
+ sbbl %ecx, -160(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %ecx
+ movl %ecx, -236(%ebp) # 4-byte Spill
+ sbbl %ecx, -156(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %ecx
+ movl %ecx, -240(%ebp) # 4-byte Spill
+ sbbl %ecx, -152(%ebp) # 4-byte Folded Spill
+ movl 112(%esi), %ecx
+ movl %ecx, -244(%ebp) # 4-byte Spill
+ sbbl %ecx, -148(%ebp) # 4-byte Folded Spill
+ movl 116(%esi), %ecx
+ movl %ecx, -248(%ebp) # 4-byte Spill
+ sbbl %ecx, -144(%ebp) # 4-byte Folded Spill
+ movl 120(%esi), %ecx
+ movl %ecx, -252(%ebp) # 4-byte Spill
+ sbbl %ecx, -172(%ebp) # 4-byte Folded Spill
+ movl 124(%esi), %ecx
+ movl %ecx, -256(%ebp) # 4-byte Spill
+ sbbl %ecx, -176(%ebp) # 4-byte Folded Spill
+ sbbl $0, %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl -180(%ebp), %eax # 4-byte Reload
+ addl -200(%ebp), %eax # 4-byte Folded Reload
+ adcl -204(%ebp), %edi # 4-byte Folded Reload
+ movl %eax, 32(%esi)
+ movl -184(%ebp), %eax # 4-byte Reload
+ adcl -208(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 36(%esi)
+ movl -192(%ebp), %ecx # 4-byte Reload
+ adcl -212(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%esi)
+ adcl -216(%ebp), %ebx # 4-byte Folded Reload
+ movl %ecx, 44(%esi)
+ movl -196(%ebp), %ecx # 4-byte Reload
+ adcl -220(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 48(%esi)
+ movl -188(%ebp), %eax # 4-byte Reload
+ adcl -224(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl %edx, %ecx
+ adcl -228(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 56(%esi)
+ movl -164(%ebp), %eax # 4-byte Reload
+ adcl -260(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 60(%esi)
+ movl -160(%ebp), %ecx # 4-byte Reload
+ adcl -264(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 64(%esi)
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl -268(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 68(%esi)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -272(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 72(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -276(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 76(%esi)
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl -280(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 80(%esi)
+ movl -172(%ebp), %eax # 4-byte Reload
+ adcl -284(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 84(%esi)
+ movl -176(%ebp), %ecx # 4-byte Reload
+ adcl -288(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 88(%esi)
+ movl -168(%ebp), %eax # 4-byte Reload
+ adcl -292(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 92(%esi)
+ movl %eax, 96(%esi)
+ movl -232(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -236(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -240(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ movl -244(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 112(%esi)
+ movl -248(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 116(%esi)
+ movl -252(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 120(%esi)
+ movl -256(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 124(%esi)
+ addl $300, %esp # imm = 0x12C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end241:
+ .size mcl_fpDbl_sqrPre16Lbmi2, .Lfunc_end241-mcl_fpDbl_sqrPre16Lbmi2
+
+ .globl mcl_fp_mont16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont16Lbmi2,@function
+mcl_fp_mont16Lbmi2: # @mcl_fp_mont16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2428, %esp # imm = 0x97C
+ calll .L242$pb
+.L242$pb:
+ popl %ebx
+.Ltmp53:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp53-.L242$pb), %ebx
+ movl 2460(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 2360(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 2360(%esp), %ebp
+ movl 2364(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2424(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 2420(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 2416(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 2412(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2408(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2404(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2400(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2396(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2392(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2388(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2384(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 2380(%esp), %edi
+ movl 2376(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2372(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2368(%esp), %esi
+ movl %eax, (%esp)
+ leal 2288(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ addl 2288(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2292(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 2296(%esp), %esi
+ movl %esi, %ebp
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2300(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2304(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2308(%esp), %edi
+ movl %edi, %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2312(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2316(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2320(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2328(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2332(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2340(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2344(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2348(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2352(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 2456(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 2216(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %edi
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 2216(%esp), %ecx
+ adcl 2220(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2224(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2228(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2232(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 2236(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2244(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2252(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2256(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2260(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2268(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2272(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2276(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 2280(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2144(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %edi
+ addl 2144(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2148(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2152(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2156(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2160(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 2164(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 2168(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2176(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2180(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2184(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2188(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2192(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2196(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2200(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2204(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 2208(%esp), %esi
+ adcl $0, %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 2072(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 2072(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2076(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2080(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2084(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2088(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 2092(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2096(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2100(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2104(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 2108(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2112(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 2116(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2120(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2124(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2128(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 2132(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2136(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2000(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 2000(%esp), %ecx
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 2004(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2008(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2012(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 2016(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2020(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2024(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2028(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2032(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 2036(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2040(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 2044(%esp), %edi
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 2048(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 2052(%esp), %ebp
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl 2056(%esp), %esi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 2060(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2064(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1928(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 1928(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1932(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1936(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1944(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1948(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1956(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1960(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1964(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1968(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1972(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 1976(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl 1980(%esp), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1984(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1988(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1992(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1856(%esp), %ecx
+ movl 2460(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 1856(%esp), %esi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1860(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1864(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1868(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1872(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1876(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1880(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1884(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1888(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1892(%esp), %esi
+ adcl 1896(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 1900(%esp), %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1904(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1908(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1912(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1916(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1920(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1784(%esp), %ecx
+ movl 2452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 1784(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1804(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1816(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1824(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1836(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1840(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1844(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1848(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1712(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1712(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1716(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1720(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1724(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1728(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1732(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1740(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1756(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1764(%esp), %ebp
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1768(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1772(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1640(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1640(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1664(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1668(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1680(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 1688(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ adcl 1692(%esp), %esi
+ movl %esi, %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1696(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1700(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1704(%esp), %esi
+ sbbl %eax, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1568(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 80(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1568(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1572(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1576(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1580(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1584(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1588(%esp), %ebp
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1592(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1596(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1600(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1604(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1608(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1612(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1616(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 1620(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1624(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1628(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 1632(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1496(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1496(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1500(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1504(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1512(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1516(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1544(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1424(%esp), %ecx
+ movl 2460(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ andl $1, %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 1424(%esp), %eax
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1432(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1440(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1444(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1448(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 1472(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1480(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1484(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1488(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 2456(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1352(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1352(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1396(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 1404(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1412(%esp), %esi
+ adcl 1416(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1280(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 1280(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1284(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1288(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1300(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1312(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1320(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1328(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1336(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1340(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1344(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 2456(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 2452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 1208(%esp), %ecx
+ adcl 1212(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1244(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1260(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1264(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1272(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1136(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1136(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl 1164(%esp), %edi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1188(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1192(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 1064(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 1064(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 1088(%esp), %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl 1092(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1116(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 992(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 996(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1000(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1004(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1008(%esp), %edi
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 1012(%esp), %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1016(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ adcl 1020(%esp), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1024(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1028(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 1032(%esp), %esi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1036(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1040(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1044(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1056(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 920(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 920(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 932(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 936(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 956(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 968(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 848(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 848(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 856(%esp), %edi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %esi # 4-byte Reload
+ adcl 868(%esp), %esi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 896(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 776(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 776(%esp), %ecx
+ adcl 780(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 784(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 792(%esp), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 800(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 704(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 712(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 728(%esp), %esi
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 732(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 752(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 92(%esp), %ecx # 4-byte Reload
+ addl 632(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 652(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl 656(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 664(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 676(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 680(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 92(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 560(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl 576(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 592(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 608(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 612(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 488(%esp), %ecx
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 500(%esp), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 508(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 516(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 520(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 536(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 416(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ adcl 436(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 440(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 448(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 116(%esp), %ecx # 4-byte Reload
+ addl 344(%esp), %ecx
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 348(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 356(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 360(%esp), %edi
+ adcl 364(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 116(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 272(%esp), %esi
+ adcl 276(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 288(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 296(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 308(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 120(%esp), %ecx # 4-byte Reload
+ addl 200(%esp), %ecx
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 212(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 220(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 232(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 244(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 2460(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ andl $1, %ebp
+ addl 128(%esp), %esi
+ movl 104(%esp), %ebx # 4-byte Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 140(%esp), %ebx
+ movl %ebx, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 156(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 160(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 168(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 172(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 176(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 180(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 184(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 188(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %edx # 4-byte Reload
+ adcl 192(%esp), %edx
+ movl %edx, 116(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %eax, %edx
+ movl 2460(%esp), %edi
+ subl (%edi), %edx
+ movl %ecx, %eax
+ sbbl 4(%edi), %eax
+ movl %ebx, %ecx
+ sbbl 8(%edi), %ecx
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 12(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl 16(%edi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 20(%edi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ sbbl 24(%edi), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ sbbl 28(%edi), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ sbbl 32(%edi), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ sbbl 36(%edi), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ sbbl 40(%edi), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ sbbl 44(%edi), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ sbbl 48(%edi), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ sbbl 52(%edi), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ sbbl 56(%edi), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ sbbl 60(%edi), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %edi # 4-byte Reload
+ sbbl $0, %ebp
+ andl $1, %ebp
+ movl %ebp, %ebx
+ jne .LBB242_2
+# BB#1:
+ movl %edx, %edi
+.LBB242_2:
+ movl 2448(%esp), %edx
+ movl %edi, (%edx)
+ testb %bl, %bl
+ movl 108(%esp), %edi # 4-byte Reload
+ jne .LBB242_4
+# BB#3:
+ movl %eax, %edi
+.LBB242_4:
+ movl %edi, 4(%edx)
+ jne .LBB242_6
+# BB#5:
+ movl %ecx, 104(%esp) # 4-byte Spill
+.LBB242_6:
+ movl 104(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%edx)
+ jne .LBB242_8
+# BB#7:
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+.LBB242_8:
+ movl 112(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edx)
+ movl 100(%esp), %eax # 4-byte Reload
+ jne .LBB242_10
+# BB#9:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB242_10:
+ movl %eax, 16(%edx)
+ movl 88(%esp), %eax # 4-byte Reload
+ jne .LBB242_12
+# BB#11:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB242_12:
+ movl %eax, 20(%edx)
+ jne .LBB242_14
+# BB#13:
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+.LBB242_14:
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ jne .LBB242_16
+# BB#15:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB242_16:
+ movl %eax, 28(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ jne .LBB242_18
+# BB#17:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB242_18:
+ movl %eax, 32(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB242_20
+# BB#19:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB242_20:
+ movl %eax, 36(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ jne .LBB242_22
+# BB#21:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB242_22:
+ movl %eax, 40(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ jne .LBB242_24
+# BB#23:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB242_24:
+ movl %eax, 44(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ jne .LBB242_26
+# BB#25:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB242_26:
+ movl %eax, 48(%edx)
+ movl 92(%esp), %eax # 4-byte Reload
+ jne .LBB242_28
+# BB#27:
+ movl 52(%esp), %eax # 4-byte Reload
+.LBB242_28:
+ movl %eax, 52(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ jne .LBB242_30
+# BB#29:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB242_30:
+ movl %eax, 56(%edx)
+ movl 116(%esp), %eax # 4-byte Reload
+ jne .LBB242_32
+# BB#31:
+ movl 120(%esp), %eax # 4-byte Reload
+.LBB242_32:
+ movl %eax, 60(%edx)
+ addl $2428, %esp # imm = 0x97C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end242:
+ .size mcl_fp_mont16Lbmi2, .Lfunc_end242-mcl_fp_mont16Lbmi2
+
+ .globl mcl_fp_montNF16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF16Lbmi2,@function
+mcl_fp_montNF16Lbmi2: # @mcl_fp_montNF16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2412, %esp # imm = 0x96C
+ calll .L243$pb
+.L243$pb:
+ popl %ebx
+.Ltmp54:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp54-.L243$pb), %ebx
+ movl 2444(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 2344(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 2344(%esp), %edi
+ movl 2348(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 2408(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2404(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2400(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2396(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2392(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2388(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2384(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2380(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 2376(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 2372(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 2368(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2364(%esp), %ebp
+ movl 2360(%esp), %esi
+ movl 2356(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2352(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 2272(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 2272(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2276(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2280(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2284(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 2288(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 2292(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2296(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2300(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 2304(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 2308(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2312(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2316(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2320(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2324(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 2328(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2332(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2336(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 2200(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 2264(%esp), %edx
+ movl 108(%esp), %ecx # 4-byte Reload
+ addl 2200(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2204(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2208(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 2216(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 2228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 2232(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 2236(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2244(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2248(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 2252(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 2260(%esp), %esi
+ adcl $0, %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2128(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 2128(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2132(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2136(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2140(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2144(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2148(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2152(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 2156(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2160(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 2164(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2172(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2176(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2180(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2184(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 2188(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 2192(%esp), %esi
+ movl 2440(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 2056(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 2120(%esp), %eax
+ movl 84(%esp), %edx # 4-byte Reload
+ addl 2056(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2060(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2064(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2068(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2072(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 2076(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 2080(%esp), %edi
+ movl %edi, %ebp
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 2084(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 2088(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2092(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2096(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 2100(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2104(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2108(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 2112(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 2116(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1984(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1984(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1988(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1992(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1996(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2000(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 2004(%esp), %edi
+ adcl 2008(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2016(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2020(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2024(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2028(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2032(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 2036(%esp), %ebp
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 2040(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2044(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2048(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1912(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1976(%esp), %eax
+ movl 76(%esp), %edx # 4-byte Reload
+ addl 1912(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1916(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1920(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1924(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1928(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1932(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1936(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1940(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1944(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 1948(%esp), %edi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1952(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1956(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1960(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ adcl 1964(%esp), %esi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1968(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1972(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1840(%esp), %ecx
+ movl 2444(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ addl 1840(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1844(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1848(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1852(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1856(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1864(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1868(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1872(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1876(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1880(%esp), %edi
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1884(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1888(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1892(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1896(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1900(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1904(%esp), %esi
+ movl 2440(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1768(%esp), %ecx
+ movl 2436(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ movl 1832(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1768(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1772(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1784(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1804(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 1808(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1828(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1696(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1696(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1700(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1704(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1708(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1712(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1716(%esp), %ebp
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1720(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1724(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1728(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1732(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1740(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1744(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1756(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1624(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1688(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 1624(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1628(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1640(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 1644(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 1648(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1664(%esp), %esi
+ movl %esi, %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1680(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1552(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1552(%esp), %esi
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1556(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1568(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1576(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1580(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1584(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1592(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1600(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1604(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1608(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1616(%esp), %edi
+ movl 2440(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1480(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1544(%esp), %eax
+ addl 1480(%esp), %esi
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 1484(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 1488(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1492(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1496(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 1500(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1504(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 1508(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1512(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1516(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1520(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ adcl 1524(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1528(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1532(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1536(%esp), %ebp
+ adcl 1540(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl $0, %edi
+ movl %esi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1408(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1408(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 1416(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1432(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1440(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1444(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1464(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1468(%esp), %ebp
+ adcl 1472(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1336(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1400(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1336(%esp), %ecx
+ adcl 1340(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1344(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1348(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 1352(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1356(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 1360(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1364(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1368(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1372(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 1376(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 1380(%esp), %edi
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1384(%esp), %esi
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1388(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 1392(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 1396(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1264(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1308(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ adcl 1312(%esp), %esi
+ movl %esi, %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1320(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1324(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 1192(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1256(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 1192(%esp), %ecx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1196(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1200(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1204(%esp), %esi
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1208(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1212(%esp), %edi
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1216(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1220(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1224(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 1228(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1232(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ adcl 1236(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1240(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 1244(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 1248(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 1252(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1120(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1120(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1132(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1140(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1144(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1176(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 1048(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1112(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 1048(%esp), %ecx
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 1052(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1068(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1076(%esp), %ebp
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1100(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 976(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 976(%esp), %edi
+ adcl 980(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 1000(%esp), %edi
+ adcl 1004(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1008(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1016(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 904(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 968(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 904(%esp), %eax
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 908(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 912(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 916(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 920(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 924(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 928(%esp), %edi
+ adcl 932(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 936(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ adcl 940(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 944(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 960(%esp), %ebp
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl %eax, %esi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 832(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 832(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 856(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 872(%esp), %esi
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 876(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 888(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 892(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 824(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 760(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 796(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 800(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 808(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 816(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 688(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 716(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 732(%esp), %ebp
+ adcl 736(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 680(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 616(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 624(%esp), %edi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 640(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 656(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 672(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 544(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 552(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 560(%esp), %edi
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 564(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 600(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 536(%esp), %edx
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 472(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 484(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ adcl 488(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 496(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 400(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 400(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 412(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 420(%esp), %edi
+ adcl 424(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 444(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 392(%esp), %edx
+ movl 92(%esp), %ecx # 4-byte Reload
+ addl 328(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 336(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 344(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 352(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 368(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 256(%esp), %ebp
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 260(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 268(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 280(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 284(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 248(%esp), %edx
+ movl %edi, %ecx
+ addl 184(%esp), %ecx
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 188(%esp), %edi
+ adcl 192(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 196(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 208(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 112(%esp), %esi
+ movl %edi, %eax
+ adcl 116(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 120(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ adcl 124(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 128(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 132(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 140(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 148(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 152(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 160(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 164(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 168(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 176(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 2444(%esp), %esi
+ subl (%esi), %edx
+ sbbl 4(%esi), %edi
+ movl %ebp, %ecx
+ sbbl 8(%esi), %ecx
+ movl %ebx, %eax
+ sbbl 12(%esi), %eax
+ movl 80(%esp), %ebx # 4-byte Reload
+ sbbl 16(%esi), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ sbbl 20(%esi), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ sbbl 24(%esi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %ebx # 4-byte Reload
+ sbbl 28(%esi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 48(%esp), %ebx # 4-byte Reload
+ sbbl 32(%esi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ sbbl 36(%esi), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ sbbl 40(%esi), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esi), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 48(%esi), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 96(%esp), %ebx # 4-byte Reload
+ sbbl 52(%esi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 92(%esp), %ebx # 4-byte Reload
+ sbbl 56(%esi), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 104(%esp), %ebx # 4-byte Reload
+ sbbl 60(%esi), %ebx
+ movl %ebx, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ testl %ebx, %ebx
+ js .LBB243_2
+# BB#1:
+ movl %edx, %esi
+.LBB243_2:
+ movl 2432(%esp), %edx
+ movl %esi, (%edx)
+ movl 108(%esp), %esi # 4-byte Reload
+ js .LBB243_4
+# BB#3:
+ movl %edi, %esi
+.LBB243_4:
+ movl %esi, 4(%edx)
+ js .LBB243_6
+# BB#5:
+ movl %ecx, %ebp
+.LBB243_6:
+ movl %ebp, 8(%edx)
+ movl 76(%esp), %ecx # 4-byte Reload
+ js .LBB243_8
+# BB#7:
+ movl %eax, %ecx
+.LBB243_8:
+ movl %ecx, 12(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB243_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB243_10:
+ movl %eax, 16(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB243_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB243_12:
+ movl %eax, 20(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB243_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB243_14:
+ movl %eax, 24(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB243_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB243_16:
+ movl %eax, 28(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB243_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB243_18:
+ movl %eax, 32(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB243_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB243_20:
+ movl %eax, 36(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB243_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB243_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB243_24
+# BB#23:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB243_24:
+ movl %eax, 44(%edx)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB243_26
+# BB#25:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB243_26:
+ movl %eax, 48(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB243_28
+# BB#27:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB243_28:
+ movl %eax, 52(%edx)
+ movl 92(%esp), %eax # 4-byte Reload
+ js .LBB243_30
+# BB#29:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB243_30:
+ movl %eax, 56(%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ js .LBB243_32
+# BB#31:
+ movl 84(%esp), %eax # 4-byte Reload
+.LBB243_32:
+ movl %eax, 60(%edx)
+ addl $2412, %esp # imm = 0x96C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end243:
+ .size mcl_fp_montNF16Lbmi2, .Lfunc_end243-mcl_fp_montNF16Lbmi2
+
+ .globl mcl_fp_montRed16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed16Lbmi2,@function
+mcl_fp_montRed16Lbmi2: # @mcl_fp_montRed16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1356, %esp # imm = 0x54C
+ calll .L244$pb
+.L244$pb:
+ popl %eax
+.Ltmp55:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp55-.L244$pb), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1384(%esp), %edx
+ movl -4(%edx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1380(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 112(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ imull %eax, %ebx
+ movl 124(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 112(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 108(%ecx), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl %esi, 152(%esp) # 4-byte Spill
+ movl 100(%ecx), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 92(%ecx), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 84(%ecx), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 80(%ecx), %edi
+ movl %edi, 148(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 192(%esp) # 4-byte Spill
+ movl 68(%ecx), %edi
+ movl %edi, 204(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ movl %esi, 200(%esp) # 4-byte Spill
+ movl 60(%ecx), %edi
+ movl %edi, 180(%esp) # 4-byte Spill
+ movl 56(%ecx), %edi
+ movl %edi, 164(%esp) # 4-byte Spill
+ movl 52(%ecx), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 40(%ecx), %ebp
+ movl 36(%ecx), %edi
+ movl 32(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 12(%ecx), %esi
+ movl 8(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 60(%edx), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 56(%edx), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 52(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 1288(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 1288(%esp), %eax
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1300(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1312(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1320(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1324(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ adcl 1328(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ adcl $0, 204(%esp) # 4-byte Folded Spill
+ adcl $0, 192(%esp) # 4-byte Folded Spill
+ movl 196(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1216(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ movl 112(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 1216(%esp), %esi
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1220(%esp), %edx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1244(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %esi # 4-byte Reload
+ adcl 1260(%esp), %esi
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1264(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ adcl $0, 192(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 196(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ movl 156(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 1144(%esp), %ebp
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1148(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 1184(%esp), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ adcl $0, 196(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ movl 168(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 156(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1072(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 1072(%esp), %esi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 168(%esp) # 4-byte Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1000(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 1000(%esp), %edi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1004(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ movl 188(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ movl 172(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 928(%esp), %esi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 932(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 188(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 172(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ movl 100(%esp), %ebp # 4-byte Reload
+ imull %ebp, %eax
+ movl %eax, (%esp)
+ leal 856(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 856(%esp), %edi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ movl 176(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 144(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull %ebp, %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 784(%esp), %esi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %ebp # 4-byte Reload
+ adcl 828(%esp), %ebp
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 176(%esp) # 4-byte Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ movl 156(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 712(%esp), %edi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ adcl 752(%esp), %ebp
+ movl %ebp, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %edi # 4-byte Reload
+ adcl 756(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 640(%esp), %esi
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 644(%esp), %ecx
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %esi # 4-byte Reload
+ adcl 668(%esp), %esi
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ adcl 680(%esp), %edi
+ movl %edi, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 1384(%esp), %eax
+ movl %eax, %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 568(%esp), %ebp
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 572(%esp), %ecx
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %ebp # 4-byte Reload
+ adcl 588(%esp), %ebp
+ adcl 592(%esp), %esi
+ movl %esi, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %esi # 4-byte Reload
+ adcl 596(%esp), %esi
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl 632(%esp), %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 496(%esp), %edi
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %edi # 4-byte Reload
+ adcl 508(%esp), %edi
+ adcl 512(%esp), %ebp
+ movl %ebp, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ adcl 520(%esp), %esi
+ movl %esi, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp # 4-byte Reload
+ adcl 532(%esp), %ebp
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 424(%esp), %esi
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ adcl 432(%esp), %edi
+ movl %edi, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %ecx # 4-byte Reload
+ adcl 436(%esp), %ecx
+ movl %ecx, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %ecx # 4-byte Reload
+ adcl 440(%esp), %ecx
+ movl %ecx, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %ecx # 4-byte Reload
+ adcl 448(%esp), %ecx
+ movl %ecx, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %ecx # 4-byte Reload
+ adcl 452(%esp), %ecx
+ movl %ecx, 196(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl 184(%esp), %ecx # 4-byte Reload
+ adcl 460(%esp), %ecx
+ movl %ecx, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %ecx # 4-byte Reload
+ adcl 464(%esp), %ecx
+ movl %ecx, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 484(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %eax, %esi
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 352(%esp), %esi
+ movl 164(%esp), %esi # 4-byte Reload
+ adcl 356(%esp), %esi
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl 380(%esp), %ebp
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl 416(%esp), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 280(%esp), %esi
+ movl 180(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl 304(%esp), %ebp
+ movl %ebp, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %esi # 4-byte Reload
+ adcl 316(%esp), %esi
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 344(%esp), %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ movl 124(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 208(%esp), %ebp
+ movl 200(%esp), %edx # 4-byte Reload
+ adcl 212(%esp), %edx
+ movl %edx, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %ecx # 4-byte Reload
+ adcl 220(%esp), %ecx
+ movl %ecx, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp # 4-byte Reload
+ adcl 228(%esp), %ebp
+ movl %ebp, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ adcl 240(%esp), %esi
+ movl %esi, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl 272(%esp), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %edx, %eax
+ subl 24(%esp), %edx # 4-byte Folded Reload
+ movl 204(%esp), %esi # 4-byte Reload
+ sbbl 12(%esp), %esi # 4-byte Folded Reload
+ sbbl 16(%esp), %ecx # 4-byte Folded Reload
+ movl 196(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ sbbl 28(%esp), %ebp # 4-byte Folded Reload
+ sbbl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 100(%esp) # 4-byte Spill
+ movl 188(%esp), %ebx # 4-byte Reload
+ sbbl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 104(%esp) # 4-byte Spill
+ movl 168(%esp), %ebx # 4-byte Reload
+ sbbl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 108(%esp) # 4-byte Spill
+ movl 176(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 112(%esp) # 4-byte Spill
+ movl 172(%esp), %ebx # 4-byte Reload
+ sbbl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 116(%esp) # 4-byte Spill
+ movl 152(%esp), %ebx # 4-byte Reload
+ sbbl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %ebx # 4-byte Reload
+ sbbl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 136(%esp) # 4-byte Spill
+ movl 144(%esp), %ebx # 4-byte Reload
+ sbbl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 140(%esp) # 4-byte Spill
+ movl 132(%esp), %ebx # 4-byte Reload
+ sbbl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 160(%esp) # 4-byte Spill
+ movl 128(%esp), %ebx # 4-byte Reload
+ sbbl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 164(%esp) # 4-byte Spill
+ movl 124(%esp), %ebx # 4-byte Reload
+ sbbl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 180(%esp) # 4-byte Spill
+ sbbl $0, %edi
+ andl $1, %edi
+ movl %edi, %ebx
+ jne .LBB244_2
+# BB#1:
+ movl %edx, 200(%esp) # 4-byte Spill
+.LBB244_2:
+ movl 1376(%esp), %edx
+ movl 200(%esp), %edi # 4-byte Reload
+ movl %edi, (%edx)
+ testb %bl, %bl
+ jne .LBB244_4
+# BB#3:
+ movl %esi, 204(%esp) # 4-byte Spill
+.LBB244_4:
+ movl 204(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%edx)
+ movl 192(%esp), %esi # 4-byte Reload
+ jne .LBB244_6
+# BB#5:
+ movl %ecx, %esi
+.LBB244_6:
+ movl %esi, 8(%edx)
+ movl 196(%esp), %ecx # 4-byte Reload
+ jne .LBB244_8
+# BB#7:
+ movl %eax, %ecx
+.LBB244_8:
+ movl %ecx, 12(%edx)
+ movl 128(%esp), %esi # 4-byte Reload
+ movl 148(%esp), %eax # 4-byte Reload
+ jne .LBB244_10
+# BB#9:
+ movl %ebp, %eax
+.LBB244_10:
+ movl %eax, 16(%edx)
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl 176(%esp), %eax # 4-byte Reload
+ movl 184(%esp), %ebp # 4-byte Reload
+ jne .LBB244_12
+# BB#11:
+ movl 100(%esp), %ebp # 4-byte Reload
+.LBB244_12:
+ movl %ebp, 20(%edx)
+ movl 152(%esp), %ebp # 4-byte Reload
+ movl 188(%esp), %ebx # 4-byte Reload
+ jne .LBB244_14
+# BB#13:
+ movl 104(%esp), %ebx # 4-byte Reload
+.LBB244_14:
+ movl %ebx, 24(%edx)
+ movl 156(%esp), %ebx # 4-byte Reload
+ movl 168(%esp), %edi # 4-byte Reload
+ jne .LBB244_16
+# BB#15:
+ movl 108(%esp), %edi # 4-byte Reload
+.LBB244_16:
+ movl %edi, 28(%edx)
+ movl 144(%esp), %edi # 4-byte Reload
+ jne .LBB244_18
+# BB#17:
+ movl 112(%esp), %eax # 4-byte Reload
+.LBB244_18:
+ movl %eax, 32(%edx)
+ jne .LBB244_20
+# BB#19:
+ movl 116(%esp), %eax # 4-byte Reload
+ movl %eax, 172(%esp) # 4-byte Spill
+.LBB244_20:
+ movl 172(%esp), %eax # 4-byte Reload
+ movl %eax, 36(%edx)
+ jne .LBB244_22
+# BB#21:
+ movl 120(%esp), %ebp # 4-byte Reload
+.LBB244_22:
+ movl %ebp, 40(%edx)
+ movl 132(%esp), %eax # 4-byte Reload
+ jne .LBB244_24
+# BB#23:
+ movl 136(%esp), %ebx # 4-byte Reload
+.LBB244_24:
+ movl %ebx, 44(%edx)
+ jne .LBB244_26
+# BB#25:
+ movl 140(%esp), %edi # 4-byte Reload
+.LBB244_26:
+ movl %edi, 48(%edx)
+ jne .LBB244_28
+# BB#27:
+ movl 160(%esp), %eax # 4-byte Reload
+.LBB244_28:
+ movl %eax, 52(%edx)
+ jne .LBB244_30
+# BB#29:
+ movl 164(%esp), %esi # 4-byte Reload
+.LBB244_30:
+ movl %esi, 56(%edx)
+ jne .LBB244_32
+# BB#31:
+ movl 180(%esp), %ecx # 4-byte Reload
+.LBB244_32:
+ movl %ecx, 60(%edx)
+ addl $1356, %esp # imm = 0x54C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end244:
+ .size mcl_fp_montRed16Lbmi2, .Lfunc_end244-mcl_fp_montRed16Lbmi2
+
+ .globl mcl_fp_addPre16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre16Lbmi2,@function
+mcl_fp_addPre16Lbmi2: # @mcl_fp_addPre16Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %ebx
+ adcl 8(%ecx), %ebx
+ movl 16(%esp), %edi
+ movl %edx, (%edi)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%edi)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %ebx, 8(%edi)
+ movl 20(%eax), %ebx
+ movl %edx, 12(%edi)
+ movl 20(%ecx), %edx
+ adcl %ebx, %edx
+ movl 24(%eax), %ebx
+ movl %esi, 16(%edi)
+ movl 24(%ecx), %esi
+ adcl %ebx, %esi
+ movl 28(%eax), %ebx
+ movl %edx, 20(%edi)
+ movl 28(%ecx), %edx
+ adcl %ebx, %edx
+ movl 32(%eax), %ebx
+ movl %esi, 24(%edi)
+ movl 32(%ecx), %esi
+ adcl %ebx, %esi
+ movl 36(%eax), %ebx
+ movl %edx, 28(%edi)
+ movl 36(%ecx), %edx
+ adcl %ebx, %edx
+ movl 40(%eax), %ebx
+ movl %esi, 32(%edi)
+ movl 40(%ecx), %esi
+ adcl %ebx, %esi
+ movl 44(%eax), %ebx
+ movl %edx, 36(%edi)
+ movl 44(%ecx), %edx
+ adcl %ebx, %edx
+ movl 48(%eax), %ebx
+ movl %esi, 40(%edi)
+ movl 48(%ecx), %esi
+ adcl %ebx, %esi
+ movl 52(%eax), %ebx
+ movl %edx, 44(%edi)
+ movl 52(%ecx), %edx
+ adcl %ebx, %edx
+ movl 56(%eax), %ebx
+ movl %esi, 48(%edi)
+ movl 56(%ecx), %esi
+ adcl %ebx, %esi
+ movl %edx, 52(%edi)
+ movl %esi, 56(%edi)
+ movl 60(%eax), %eax
+ movl 60(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 60(%edi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end245:
+ .size mcl_fp_addPre16Lbmi2, .Lfunc_end245-mcl_fp_addPre16Lbmi2
+
+ .globl mcl_fp_subPre16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre16Lbmi2,@function
+mcl_fp_subPre16Lbmi2: # @mcl_fp_subPre16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebp
+ sbbl 8(%edx), %ebp
+ movl 20(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebx)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebp, 8(%ebx)
+ movl 20(%edx), %ebp
+ movl %esi, 12(%ebx)
+ movl 20(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 24(%edx), %ebp
+ movl %edi, 16(%ebx)
+ movl 24(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 28(%edx), %ebp
+ movl %esi, 20(%ebx)
+ movl 28(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 32(%edx), %ebp
+ movl %edi, 24(%ebx)
+ movl 32(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 36(%edx), %ebp
+ movl %esi, 28(%ebx)
+ movl 36(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 40(%edx), %ebp
+ movl %edi, 32(%ebx)
+ movl 40(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 44(%edx), %ebp
+ movl %esi, 36(%ebx)
+ movl 44(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 48(%edx), %ebp
+ movl %edi, 40(%ebx)
+ movl 48(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 52(%edx), %ebp
+ movl %esi, 44(%ebx)
+ movl 52(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 56(%edx), %ebp
+ movl %edi, 48(%ebx)
+ movl 56(%ecx), %edi
+ sbbl %ebp, %edi
+ movl %esi, 52(%ebx)
+ movl %edi, 56(%ebx)
+ movl 60(%edx), %edx
+ movl 60(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 60(%ebx)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end246:
+ .size mcl_fp_subPre16Lbmi2, .Lfunc_end246-mcl_fp_subPre16Lbmi2
+
+ .globl mcl_fp_shr1_16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_16Lbmi2,@function
+mcl_fp_shr1_16Lbmi2: # @mcl_fp_shr1_16Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 48(%ecx)
+ movl 56(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 52(%ecx)
+ movl 60(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 56(%ecx)
+ shrl %eax
+ movl %eax, 60(%ecx)
+ popl %esi
+ retl
+.Lfunc_end247:
+ .size mcl_fp_shr1_16Lbmi2, .Lfunc_end247-mcl_fp_shr1_16Lbmi2
+
+ .globl mcl_fp_add16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add16Lbmi2,@function
+mcl_fp_add16Lbmi2: # @mcl_fp_add16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 84(%esp), %edx
+ movl (%edx), %esi
+ movl 4(%edx), %ebp
+ movl 80(%esp), %ecx
+ addl (%ecx), %esi
+ movl %esi, %ebx
+ adcl 4(%ecx), %ebp
+ movl 8(%edx), %eax
+ adcl 8(%ecx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 12(%ecx), %esi
+ movl 16(%ecx), %edi
+ adcl 12(%edx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 16(%edx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ adcl 20(%edx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ adcl 24(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ adcl 28(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ adcl 32(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ adcl 36(%edx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ adcl 40(%edx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ adcl 44(%edx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ adcl 48(%edx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%ecx), %eax
+ adcl 52(%edx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ adcl 56(%edx), %esi
+ movl 60(%ecx), %ecx
+ adcl 60(%edx), %ecx
+ movl 76(%esp), %edx
+ movl %ebx, (%edx)
+ movl %ebx, %eax
+ movl %ebp, 4(%edx)
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%edx)
+ movl 48(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%edx)
+ movl %edi, 16(%edx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%edx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%edx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%edx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%edx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%edx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%edx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%edx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 48(%edx)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 52(%edx)
+ movl %esi, 56(%edx)
+ movl %ecx, 60(%edx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 88(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 8(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ sbbl 16(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 20(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ sbbl 24(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ sbbl 44(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 48(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ sbbl 52(%edi), %eax
+ movl %eax, %ebp
+ sbbl 56(%edi), %esi
+ sbbl 60(%edi), %ecx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB248_2
+# BB#1: # %nocarry
+ movl 4(%esp), %edi # 4-byte Reload
+ movl %edi, (%edx)
+ movl (%esp), %edi # 4-byte Reload
+ movl %edi, 4(%edx)
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%edx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%edx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%edx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%edx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%edx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%edx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%edx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%edx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%edx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%edx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 48(%edx)
+ movl %ebp, 52(%edx)
+ movl %esi, 56(%edx)
+ movl %ecx, 60(%edx)
+.LBB248_2: # %carry
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end248:
+ .size mcl_fp_add16Lbmi2, .Lfunc_end248-mcl_fp_add16Lbmi2
+
+ .globl mcl_fp_addNF16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF16Lbmi2,@function
+mcl_fp_addNF16Lbmi2: # @mcl_fp_addNF16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $124, %esp
+ movl 152(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %ecx
+ movl 148(%esp), %esi
+ addl (%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 4(%esi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%edx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 56(%edx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 48(%edx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 44(%edx), %edi
+ movl 40(%edx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 36(%edx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl 20(%edx), %ebp
+ movl 16(%edx), %ebx
+ movl 12(%edx), %ecx
+ movl 8(%edx), %edx
+ adcl 8(%esi), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 12(%esi), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 16(%esi), %ebx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl 24(%esi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 28(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 32(%esi), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 36(%esi), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 44(%esi), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esi), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 52(%esi), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 56(%esi), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 60(%esi), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 156(%esp), %edi
+ movl 80(%esp), %esi # 4-byte Reload
+ subl (%edi), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ sbbl 4(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 8(%edi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ sbbl 12(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ sbbl 20(%edi), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ sbbl 24(%edi), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 44(%edi), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 52(%edi), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ movl %ecx, %ebx
+ sbbl 56(%edi), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 60(%edi), %ebx
+ movl 80(%esp), %edi # 4-byte Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ testl %ebx, %ebx
+ js .LBB249_2
+# BB#1:
+ movl %esi, %edi
+.LBB249_2:
+ movl 144(%esp), %ebx
+ movl %edi, (%ebx)
+ movl 84(%esp), %edx # 4-byte Reload
+ js .LBB249_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+.LBB249_4:
+ movl %edx, 4(%ebx)
+ movl 68(%esp), %edx # 4-byte Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB249_6
+# BB#5:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB249_6:
+ movl %eax, 8(%ebx)
+ movl 100(%esp), %eax # 4-byte Reload
+ movl 88(%esp), %ecx # 4-byte Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ js .LBB249_8
+# BB#7:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB249_8:
+ movl %esi, 12(%ebx)
+ movl 108(%esp), %esi # 4-byte Reload
+ js .LBB249_10
+# BB#9:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB249_10:
+ movl %edx, 16(%ebx)
+ movl 112(%esp), %edi # 4-byte Reload
+ movl 104(%esp), %ebp # 4-byte Reload
+ js .LBB249_12
+# BB#11:
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+.LBB249_12:
+ movl 72(%esp), %edx # 4-byte Reload
+ movl %edx, 20(%ebx)
+ js .LBB249_14
+# BB#13:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB249_14:
+ movl %ecx, 24(%ebx)
+ js .LBB249_16
+# BB#15:
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 116(%esp) # 4-byte Spill
+.LBB249_16:
+ movl 116(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%ebx)
+ js .LBB249_18
+# BB#17:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB249_18:
+ movl %eax, 32(%ebx)
+ movl 96(%esp), %ecx # 4-byte Reload
+ js .LBB249_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 120(%esp) # 4-byte Spill
+.LBB249_20:
+ movl 120(%esp), %eax # 4-byte Reload
+ movl %eax, 36(%ebx)
+ js .LBB249_22
+# BB#21:
+ movl 36(%esp), %ebp # 4-byte Reload
+.LBB249_22:
+ movl %ebp, 40(%ebx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB249_24
+# BB#23:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB249_24:
+ movl %eax, 44(%ebx)
+ movl 92(%esp), %eax # 4-byte Reload
+ js .LBB249_26
+# BB#25:
+ movl 44(%esp), %esi # 4-byte Reload
+.LBB249_26:
+ movl %esi, 48(%ebx)
+ js .LBB249_28
+# BB#27:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB249_28:
+ movl %eax, 52(%ebx)
+ js .LBB249_30
+# BB#29:
+ movl 52(%esp), %ecx # 4-byte Reload
+.LBB249_30:
+ movl %ecx, 56(%ebx)
+ js .LBB249_32
+# BB#31:
+ movl 56(%esp), %edi # 4-byte Reload
+.LBB249_32:
+ movl %edi, 60(%ebx)
+ addl $124, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end249:
+ .size mcl_fp_addNF16Lbmi2, .Lfunc_end249-mcl_fp_addNF16Lbmi2
+
+ .globl mcl_fp_sub16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub16Lbmi2,@function
+mcl_fp_sub16Lbmi2: # @mcl_fp_sub16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 88(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ sbbl 32(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 44(%esi), %edx
+ sbbl 44(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 48(%esi), %ecx
+ sbbl 48(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 52(%esi), %eax
+ sbbl 52(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 56(%esi), %ebp
+ sbbl 56(%edi), %ebp
+ movl 60(%esi), %esi
+ sbbl 60(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 80(%esp), %ebx
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 56(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%ebx)
+ movl %edx, 44(%ebx)
+ movl %ecx, 48(%ebx)
+ movl %eax, 52(%ebx)
+ movl %ebp, 56(%ebx)
+ movl %esi, 60(%ebx)
+ je .LBB250_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 92(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 8(%esi), %edi
+ movl 12(%esi), %eax
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 44(%esi), %eax
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl 48(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%ebx)
+ movl 52(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 48(%ebx)
+ movl %eax, 52(%ebx)
+ movl 56(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 56(%ebx)
+ movl 60(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%ebx)
+.LBB250_2: # %nocarry
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end250:
+ .size mcl_fp_sub16Lbmi2, .Lfunc_end250-mcl_fp_sub16Lbmi2
+
+ .globl mcl_fp_subNF16Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF16Lbmi2,@function
+mcl_fp_subNF16Lbmi2: # @mcl_fp_subNF16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $104, %esp
+ movl 128(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 132(%esp), %edi
+ subl (%edi), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ sbbl 4(%edi), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 52(%ecx), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 36(%ecx), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ sbbl 36(%edi), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 52(%edi), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 56(%edi), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 60(%edi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sarl $31, %eax
+ movl 136(%esp), %esi
+ movl 60(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 48(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 40(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 36(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 32(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 28(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 24(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 20(%esi), %ebp
+ andl %eax, %ebp
+ movl 16(%esi), %ebx
+ andl %eax, %ebx
+ movl 12(%esi), %edi
+ andl %eax, %edi
+ movl 8(%esi), %edx
+ andl %eax, %edx
+ movl 4(%esi), %ecx
+ andl %eax, %ecx
+ andl (%esi), %eax
+ addl 64(%esp), %eax # 4-byte Folded Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl 124(%esp), %esi
+ movl %eax, (%esi)
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 4(%esi)
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edx, 8(%esi)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 12(%esi)
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebx, 16(%esi)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 20(%esi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 24(%esi)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 28(%esi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%esi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%esi)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%esi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 44(%esi)
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl %eax, 56(%esi)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esi)
+ addl $104, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end251:
+ .size mcl_fp_subNF16Lbmi2, .Lfunc_end251-mcl_fp_subNF16Lbmi2
+
+ .globl mcl_fpDbl_add16Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add16Lbmi2,@function
+mcl_fpDbl_add16Lbmi2: # @mcl_fpDbl_add16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $116, %esp
+ movl 144(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 140(%esp), %ebx
+ addl (%ebx), %esi
+ adcl 4(%ebx), %edx
+ movl 8(%ecx), %edi
+ adcl 8(%ebx), %edi
+ movl 12(%ebx), %ebp
+ movl 136(%esp), %eax
+ movl %esi, (%eax)
+ movl 16(%ebx), %esi
+ adcl 12(%ecx), %ebp
+ adcl 16(%ecx), %esi
+ movl %edx, 4(%eax)
+ movl 72(%ecx), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl %edi, 8(%eax)
+ movl 20(%ecx), %edx
+ movl %ebp, 12(%eax)
+ movl 20(%ebx), %edi
+ adcl %edx, %edi
+ movl 24(%ecx), %edx
+ movl %esi, 16(%eax)
+ movl 24(%ebx), %esi
+ adcl %edx, %esi
+ movl 28(%ecx), %edx
+ movl %edi, 20(%eax)
+ movl 28(%ebx), %edi
+ adcl %edx, %edi
+ movl 32(%ecx), %edx
+ movl %esi, 24(%eax)
+ movl 32(%ebx), %esi
+ adcl %edx, %esi
+ movl 36(%ecx), %edx
+ movl %edi, 28(%eax)
+ movl 36(%ebx), %edi
+ adcl %edx, %edi
+ movl 40(%ecx), %edx
+ movl %esi, 32(%eax)
+ movl 40(%ebx), %esi
+ adcl %edx, %esi
+ movl 44(%ecx), %edx
+ movl %edi, 36(%eax)
+ movl 44(%ebx), %edi
+ adcl %edx, %edi
+ movl 48(%ecx), %edx
+ movl %esi, 40(%eax)
+ movl 48(%ebx), %esi
+ adcl %edx, %esi
+ movl 52(%ecx), %edx
+ movl %edi, 44(%eax)
+ movl 52(%ebx), %edi
+ adcl %edx, %edi
+ movl 56(%ecx), %edx
+ movl %esi, 48(%eax)
+ movl 56(%ebx), %esi
+ adcl %edx, %esi
+ movl 60(%ecx), %edx
+ movl %edi, 52(%eax)
+ movl 60(%ebx), %ebp
+ adcl %edx, %ebp
+ movl 64(%ecx), %edx
+ movl %esi, 56(%eax)
+ movl 64(%ebx), %esi
+ adcl %edx, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%ecx), %edx
+ movl %ebp, 60(%eax)
+ movl 68(%ebx), %eax
+ adcl %edx, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 72(%ebx), %eax
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%ecx), %ebp
+ movl 76(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%ecx), %ebp
+ movl 80(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 84(%ecx), %ebp
+ movl 84(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 88(%ecx), %ebp
+ movl 88(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%ecx), %ebp
+ movl 92(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%ecx), %ebp
+ movl 96(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 100(%ecx), %ebp
+ movl 100(%ebx), %edx
+ adcl %ebp, %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 104(%ecx), %ebp
+ movl 104(%ebx), %edx
+ adcl %ebp, %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 108(%ecx), %ebp
+ movl 108(%ebx), %edx
+ adcl %ebp, %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 112(%ecx), %edx
+ movl 112(%ebx), %ebp
+ adcl %edx, %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 116(%ecx), %edx
+ movl 116(%ebx), %esi
+ adcl %edx, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 120(%ecx), %edx
+ movl 120(%ebx), %edi
+ adcl %edx, %edi
+ movl 124(%ecx), %ecx
+ movl 124(%ebx), %esi
+ adcl %ecx, %esi
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 148(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ subl (%edx), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 76(%esp), %ebx # 4-byte Reload
+ sbbl 4(%edx), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ sbbl 8(%edx), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ sbbl 12(%edx), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 104(%esp), %ebx # 4-byte Reload
+ sbbl 16(%edx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 20(%edx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 108(%esp), %ebx # 4-byte Reload
+ sbbl 24(%edx), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 92(%esp), %ebx # 4-byte Reload
+ sbbl 28(%edx), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ sbbl 32(%edx), %ebx
+ movl 112(%esp), %eax # 4-byte Reload
+ sbbl 36(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ sbbl 40(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 44(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl 48(%edx), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ movl %eax, %ebp
+ sbbl 52(%edx), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ sbbl 56(%edx), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ sbbl 60(%edx), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB252_2
+# BB#1:
+ movl %ebx, 64(%esp) # 4-byte Spill
+.LBB252_2:
+ testb %cl, %cl
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB252_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB252_4:
+ movl 136(%esp), %ebx
+ movl %ecx, 64(%ebx)
+ movl %esi, %ebp
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ movl 92(%esp), %ecx # 4-byte Reload
+ movl 88(%esp), %edx # 4-byte Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ jne .LBB252_6
+# BB#5:
+ movl 4(%esp), %esi # 4-byte Reload
+.LBB252_6:
+ movl %esi, 68(%ebx)
+ movl 84(%esp), %esi # 4-byte Reload
+ movl 80(%esp), %eax # 4-byte Reload
+ jne .LBB252_8
+# BB#7:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB252_8:
+ movl %eax, 72(%ebx)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB252_10
+# BB#9:
+ movl 12(%esp), %esi # 4-byte Reload
+.LBB252_10:
+ movl %esi, 76(%ebx)
+ jne .LBB252_12
+# BB#11:
+ movl 16(%esp), %esi # 4-byte Reload
+ movl %esi, 104(%esp) # 4-byte Spill
+.LBB252_12:
+ movl 104(%esp), %esi # 4-byte Reload
+ movl %esi, 80(%ebx)
+ jne .LBB252_14
+# BB#13:
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB252_14:
+ movl %edx, 84(%ebx)
+ jne .LBB252_16
+# BB#15:
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 108(%esp) # 4-byte Spill
+.LBB252_16:
+ movl 108(%esp), %edx # 4-byte Reload
+ movl %edx, 88(%ebx)
+ jne .LBB252_18
+# BB#17:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB252_18:
+ movl %ecx, 92(%ebx)
+ movl 64(%esp), %ecx # 4-byte Reload
+ movl %ecx, 96(%ebx)
+ jne .LBB252_20
+# BB#19:
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 112(%esp) # 4-byte Spill
+.LBB252_20:
+ movl 112(%esp), %ecx # 4-byte Reload
+ movl %ecx, 100(%ebx)
+ jne .LBB252_22
+# BB#21:
+ movl 36(%esp), %edi # 4-byte Reload
+.LBB252_22:
+ movl %edi, 104(%ebx)
+ movl 100(%esp), %ecx # 4-byte Reload
+ jne .LBB252_24
+# BB#23:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB252_24:
+ movl %ecx, 108(%ebx)
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB252_26
+# BB#25:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB252_26:
+ movl %eax, 112(%ebx)
+ movl 68(%esp), %eax # 4-byte Reload
+ jne .LBB252_28
+# BB#27:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB252_28:
+ movl %eax, 116(%ebx)
+ jne .LBB252_30
+# BB#29:
+ movl 52(%esp), %ecx # 4-byte Reload
+.LBB252_30:
+ movl %ecx, 120(%ebx)
+ jne .LBB252_32
+# BB#31:
+ movl 56(%esp), %ebp # 4-byte Reload
+.LBB252_32:
+ movl %ebp, 124(%ebx)
+ addl $116, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end252:
+ .size mcl_fpDbl_add16Lbmi2, .Lfunc_end252-mcl_fpDbl_add16Lbmi2
+
+ .globl mcl_fpDbl_sub16Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub16Lbmi2,@function
+mcl_fpDbl_sub16Lbmi2: # @mcl_fpDbl_sub16Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ movl 132(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edi
+ movl 136(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%eax), %ebx
+ sbbl 8(%edx), %ebx
+ movl 128(%esp), %ecx
+ movl %esi, (%ecx)
+ movl 12(%eax), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ecx)
+ movl 16(%eax), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ecx)
+ movl 24(%eax), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ecx)
+ movl 32(%eax), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ecx)
+ movl 40(%eax), %edi
+ sbbl %ebx, %edi
+ movl 44(%edx), %ebx
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ sbbl %ebx, %esi
+ movl 48(%edx), %ebx
+ movl %edi, 40(%ecx)
+ movl 48(%eax), %edi
+ sbbl %ebx, %edi
+ movl 52(%edx), %ebx
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %esi
+ sbbl %ebx, %esi
+ movl 56(%edx), %ebx
+ movl %edi, 48(%ecx)
+ movl 56(%eax), %edi
+ sbbl %ebx, %edi
+ movl 60(%edx), %ebx
+ movl %esi, 52(%ecx)
+ movl 60(%eax), %esi
+ sbbl %ebx, %esi
+ movl 64(%edx), %ebx
+ movl %edi, 56(%ecx)
+ movl 64(%eax), %edi
+ sbbl %ebx, %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 68(%edx), %edi
+ movl %esi, 60(%ecx)
+ movl 68(%eax), %esi
+ sbbl %edi, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 72(%edx), %esi
+ movl 72(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 76(%edx), %esi
+ movl 76(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 80(%edx), %esi
+ movl 80(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 84(%edx), %esi
+ movl 84(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 88(%edx), %esi
+ movl 88(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 92(%edx), %esi
+ movl 92(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 96(%edx), %esi
+ movl 96(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 100(%edx), %esi
+ movl 100(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 104(%edx), %esi
+ movl 104(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 108(%edx), %esi
+ movl 108(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 112(%edx), %esi
+ movl 112(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 116(%edx), %esi
+ movl 116(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 120(%edx), %esi
+ movl 120(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 124(%edx), %edx
+ movl 124(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 140(%esp), %ebx
+ jne .LBB253_1
+# BB#2:
+ movl $0, 68(%esp) # 4-byte Folded Spill
+ jmp .LBB253_3
+.LBB253_1:
+ movl 60(%ebx), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+.LBB253_3:
+ testb %al, %al
+ jne .LBB253_4
+# BB#5:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ movl $0, %ebp
+ jmp .LBB253_6
+.LBB253_4:
+ movl (%ebx), %ebp
+ movl 4(%ebx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB253_6:
+ jne .LBB253_7
+# BB#8:
+ movl $0, 36(%esp) # 4-byte Folded Spill
+ jmp .LBB253_9
+.LBB253_7:
+ movl 56(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+.LBB253_9:
+ jne .LBB253_10
+# BB#11:
+ movl $0, 32(%esp) # 4-byte Folded Spill
+ jmp .LBB253_12
+.LBB253_10:
+ movl 52(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+.LBB253_12:
+ jne .LBB253_13
+# BB#14:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB253_15
+.LBB253_13:
+ movl 48(%ebx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+.LBB253_15:
+ jne .LBB253_16
+# BB#17:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB253_18
+.LBB253_16:
+ movl 44(%ebx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB253_18:
+ jne .LBB253_19
+# BB#20:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB253_21
+.LBB253_19:
+ movl 40(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB253_21:
+ jne .LBB253_22
+# BB#23:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB253_24
+.LBB253_22:
+ movl 36(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB253_24:
+ jne .LBB253_25
+# BB#26:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB253_27
+.LBB253_25:
+ movl 32(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB253_27:
+ jne .LBB253_28
+# BB#29:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB253_30
+.LBB253_28:
+ movl 28(%ebx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB253_30:
+ jne .LBB253_31
+# BB#32:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB253_33
+.LBB253_31:
+ movl 24(%ebx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB253_33:
+ jne .LBB253_34
+# BB#35:
+ movl $0, %esi
+ jmp .LBB253_36
+.LBB253_34:
+ movl 20(%ebx), %esi
+.LBB253_36:
+ jne .LBB253_37
+# BB#38:
+ movl $0, %edx
+ jmp .LBB253_39
+.LBB253_37:
+ movl 16(%ebx), %edx
+.LBB253_39:
+ jne .LBB253_40
+# BB#41:
+ movl $0, %edi
+ jmp .LBB253_42
+.LBB253_40:
+ movl 12(%ebx), %edi
+.LBB253_42:
+ jne .LBB253_43
+# BB#44:
+ xorl %ebx, %ebx
+ jmp .LBB253_45
+.LBB253_43:
+ movl 8(%ebx), %ebx
+.LBB253_45:
+ addl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ movl 24(%esp), %ebp # 4-byte Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 68(%ecx)
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 72(%ecx)
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 76(%ecx)
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 80(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 84(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 96(%ecx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 100(%ecx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 104(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 108(%ecx)
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 112(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 116(%ecx)
+ movl %eax, 120(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 124(%ecx)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end253:
+ .size mcl_fpDbl_sub16Lbmi2, .Lfunc_end253-mcl_fpDbl_sub16Lbmi2
+
+ .align 16, 0x90
+ .type .LmulPv544x32,@function
+.LmulPv544x32: # @mulPv544x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl %edx, %eax
+ movl 80(%esp), %esi
+ movl %esi, %edx
+ mulxl 4(%eax), %edi, %ebx
+ movl %esi, %edx
+ mulxl (%eax), %ebp, %edx
+ movl %ebp, 56(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 8(%eax), %edx, %edi
+ adcl %ebx, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 12(%eax), %edx, %ebx
+ adcl %edi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 16(%eax), %edx, %edi
+ adcl %ebx, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 20(%eax), %edx, %ebx
+ adcl %edi, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 24(%eax), %edx, %edi
+ adcl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 28(%eax), %edx, %ebx
+ adcl %edi, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 32(%eax), %edx, %edi
+ adcl %ebx, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 36(%eax), %edx, %ebx
+ adcl %edi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 40(%eax), %edx, %edi
+ adcl %ebx, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 44(%eax), %edx, %ebx
+ adcl %edi, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 48(%eax), %edx, %edi
+ adcl %ebx, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %esi, %edx
+ mulxl 52(%eax), %ebx, %ebp
+ adcl %edi, %ebx
+ movl %esi, %edx
+ mulxl 56(%eax), %edi, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ adcl %ebp, %edi
+ movl %esi, %edx
+ mulxl 60(%eax), %edx, %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ecx)
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ecx)
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%ecx)
+ movl 44(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%ecx)
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%ecx)
+ movl 36(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%ecx)
+ movl 32(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%ecx)
+ movl 28(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%ecx)
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%ecx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%ecx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%ecx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%ecx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%ecx)
+ movl %ebx, 52(%ecx)
+ movl %edi, 56(%ecx)
+ movl %edx, 60(%ecx)
+ movl %esi, %edx
+ mulxl 64(%eax), %eax, %edx
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ adcl $0, %edx
+ movl %edx, 68(%ecx)
+ movl %ecx, %eax
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end254:
+ .size .LmulPv544x32, .Lfunc_end254-.LmulPv544x32
+
+ .globl mcl_fp_mulUnitPre17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre17Lbmi2,@function
+mcl_fp_mulUnitPre17Lbmi2: # @mcl_fp_mulUnitPre17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $140, %esp
+ calll .L255$pb
+.L255$pb:
+ popl %ebx
+.Ltmp56:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp56-.L255$pb), %ebx
+ movl 168(%esp), %eax
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 164(%esp), %edx
+ calll .LmulPv544x32
+ movl 132(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 108(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 104(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp
+ movl 80(%esp), %ebx
+ movl 76(%esp), %edi
+ movl 72(%esp), %esi
+ movl 64(%esp), %edx
+ movl 68(%esp), %ecx
+ movl 160(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, 64(%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, 68(%eax)
+ addl $140, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end255:
+ .size mcl_fp_mulUnitPre17Lbmi2, .Lfunc_end255-mcl_fp_mulUnitPre17Lbmi2
+
+ .globl mcl_fpDbl_mulPre17Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre17Lbmi2,@function
+mcl_fpDbl_mulPre17Lbmi2: # @mcl_fpDbl_mulPre17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1356, %esp # imm = 0x54C
+ calll .L256$pb
+.L256$pb:
+ popl %edi
+.Ltmp57:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp57-.L256$pb), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 1384(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1280(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl %edx, %esi
+ movl %edi, %ebx
+ calll .LmulPv544x32
+ movl 1348(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1344(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1340(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1332(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1328(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1320(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1316(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1312(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1308(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1304(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1300(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1296(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1292(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 1288(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 1280(%esp), %eax
+ movl 1284(%esp), %ebp
+ movl 1376(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 1384(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl %esi, %edx
+ movl %edi, %ebx
+ calll .LmulPv544x32
+ addl 1208(%esp), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 1276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1272(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1268(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1260(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1256(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 1252(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1248(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1244(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1236(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1232(%esp), %edi
+ movl 1228(%esp), %esi
+ movl 1224(%esp), %edx
+ movl 1220(%esp), %ecx
+ movl 1212(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1216(%esp), %eax
+ movl 1376(%esp), %ebp
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ movl 12(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 120(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 64(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1136(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 1136(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1204(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1200(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1196(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1192(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1188(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1176(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1168(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1164(%esp), %ebx
+ movl 1160(%esp), %edi
+ movl 1156(%esp), %esi
+ movl 1152(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1148(%esp), %edx
+ movl 1140(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1144(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1064(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1064(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 1132(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1128(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1124(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1120(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1116(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1108(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1100(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1096(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1092(%esp), %ebx
+ movl 1088(%esp), %edi
+ movl 1084(%esp), %esi
+ movl 1080(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1076(%esp), %edx
+ movl 1068(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1072(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 992(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1020(%esp), %ebx
+ movl 1016(%esp), %edi
+ movl 1012(%esp), %esi
+ movl 1008(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1004(%esp), %edx
+ movl 996(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1000(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 920(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 920(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 972(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 968(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 964(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 956(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 952(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 948(%esp), %ebx
+ movl 944(%esp), %edi
+ movl 940(%esp), %esi
+ movl 936(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 932(%esp), %edx
+ movl 924(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 928(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 848(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 848(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 916(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 912(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 908(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 904(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 900(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 892(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 888(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 876(%esp), %ebx
+ movl 872(%esp), %edi
+ movl 868(%esp), %esi
+ movl 864(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 860(%esp), %edx
+ movl 852(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 856(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 776(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 776(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 844(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 840(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 836(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 804(%esp), %ebx
+ movl 800(%esp), %edi
+ movl 796(%esp), %esi
+ movl 792(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 788(%esp), %edx
+ movl 780(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 704(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 732(%esp), %ebx
+ movl 728(%esp), %edi
+ movl 724(%esp), %esi
+ movl 720(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 716(%esp), %edx
+ movl 708(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 712(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 632(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 696(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 644(%esp), %edx
+ movl 636(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 640(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 588(%esp), %ebx
+ movl 584(%esp), %edi
+ movl 580(%esp), %esi
+ movl 576(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 488(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 524(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 520(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 516(%esp), %ebx
+ movl 512(%esp), %edi
+ movl 508(%esp), %esi
+ movl 504(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 500(%esp), %edx
+ movl 492(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 496(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 416(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 444(%esp), %ebx
+ movl 440(%esp), %edi
+ movl 436(%esp), %esi
+ movl 432(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 428(%esp), %edx
+ movl 420(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 424(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 344(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 372(%esp), %ebx
+ movl 368(%esp), %edi
+ movl 364(%esp), %esi
+ movl 360(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 356(%esp), %edx
+ movl 348(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 352(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 1380(%esp), %eax
+ movl %eax, %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 272(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 320(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 316(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 300(%esp), %ebx
+ movl 296(%esp), %edi
+ movl 292(%esp), %edx
+ movl 288(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl 280(%esp), %ecx
+ movl 120(%esp), %esi # 4-byte Reload
+ movl 1376(%esp), %ebp
+ movl %esi, 56(%ebp)
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebp
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %ecx
+ movl %ecx, %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 200(%esp), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 248(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 232(%esp), %edi
+ movl 228(%esp), %esi
+ movl 224(%esp), %edx
+ movl 220(%esp), %ecx
+ movl 216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl 204(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 208(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 1376(%esp), %ebx
+ movl %ebp, 60(%ebx)
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 64(%eax), %eax
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 128(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 192(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 184(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 156(%esp), %ebx
+ movl 152(%esp), %edi
+ movl 148(%esp), %esi
+ movl 144(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 64(%eax)
+ movl 64(%esp), %ebp # 4-byte Reload
+ movl %ebp, 68(%eax)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ movl %ebp, 72(%eax)
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 84(%eax)
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 88(%eax)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 92(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 108(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 116(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 112(%eax)
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 116(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 120(%eax)
+ movl %ecx, 124(%eax)
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 128(%eax)
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 132(%eax)
+ addl $1356, %esp # imm = 0x54C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end256:
+ .size mcl_fpDbl_mulPre17Lbmi2, .Lfunc_end256-mcl_fpDbl_mulPre17Lbmi2
+
+ .globl mcl_fpDbl_sqrPre17Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre17Lbmi2,@function
+mcl_fpDbl_sqrPre17Lbmi2: # @mcl_fpDbl_sqrPre17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1356, %esp # imm = 0x54C
+ calll .L257$pb
+.L257$pb:
+ popl %ebx
+.Ltmp58:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp58-.L257$pb), %ebx
+ movl %ebx, 124(%esp) # 4-byte Spill
+ movl 1380(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 1280(%esp), %ecx
+ movl %edx, %edi
+ movl %ebx, %esi
+ calll .LmulPv544x32
+ movl 1348(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1344(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1340(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1332(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1328(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1320(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1316(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1312(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1308(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1304(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1300(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1296(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1292(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 1288(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 1280(%esp), %eax
+ movl 1284(%esp), %ebp
+ movl 1376(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl %esi, %ebx
+ calll .LmulPv544x32
+ addl 1208(%esp), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 1276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1272(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1268(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1260(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1256(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 1252(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1248(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1244(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1236(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1232(%esp), %edi
+ movl 1228(%esp), %esi
+ movl 1224(%esp), %edx
+ movl 1220(%esp), %ecx
+ movl 1212(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1216(%esp), %eax
+ movl 1376(%esp), %ebp
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ movl 12(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 120(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 64(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 1136(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 1136(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1204(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1200(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1196(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1192(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1188(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1176(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1168(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1164(%esp), %ebx
+ movl 1160(%esp), %edi
+ movl 1156(%esp), %esi
+ movl 1152(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1148(%esp), %edx
+ movl 1140(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1144(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 1064(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1064(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 1132(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1128(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1124(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1120(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1116(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1108(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1100(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1096(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1092(%esp), %ebx
+ movl 1088(%esp), %edi
+ movl 1084(%esp), %esi
+ movl 1080(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1076(%esp), %edx
+ movl 1068(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1072(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 992(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1020(%esp), %ebx
+ movl 1016(%esp), %edi
+ movl 1012(%esp), %esi
+ movl 1008(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1004(%esp), %edx
+ movl 996(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1000(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 920(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 920(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 972(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 968(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 964(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 956(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 952(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 948(%esp), %ebx
+ movl 944(%esp), %edi
+ movl 940(%esp), %esi
+ movl 936(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 932(%esp), %edx
+ movl 924(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 928(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 848(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 848(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 916(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 912(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 908(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 904(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 900(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 892(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 888(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 876(%esp), %ebx
+ movl 872(%esp), %edi
+ movl 868(%esp), %esi
+ movl 864(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 860(%esp), %edx
+ movl 852(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 856(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 776(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 776(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 844(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 840(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 836(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 804(%esp), %ebx
+ movl 800(%esp), %edi
+ movl 796(%esp), %esi
+ movl 792(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 788(%esp), %edx
+ movl 780(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 704(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 732(%esp), %ebx
+ movl 728(%esp), %edi
+ movl 724(%esp), %esi
+ movl 720(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 716(%esp), %edx
+ movl 708(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 712(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 632(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 696(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 644(%esp), %edx
+ movl 636(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 640(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 588(%esp), %ebx
+ movl 584(%esp), %edi
+ movl 580(%esp), %esi
+ movl 576(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 44(%edx), %eax
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 488(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 524(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 520(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 516(%esp), %ebx
+ movl 512(%esp), %edi
+ movl 508(%esp), %esi
+ movl 504(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 500(%esp), %edx
+ movl 492(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 496(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 48(%edx), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 416(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 444(%esp), %ebx
+ movl 440(%esp), %edi
+ movl 436(%esp), %esi
+ movl 432(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 428(%esp), %edx
+ movl 420(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 424(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 52(%edx), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 344(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 372(%esp), %ebx
+ movl 368(%esp), %edi
+ movl 364(%esp), %esi
+ movl 360(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 356(%esp), %edx
+ movl 348(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 352(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 56(%edx), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 272(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 320(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 316(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 300(%esp), %ebx
+ movl 296(%esp), %edi
+ movl 292(%esp), %edx
+ movl 288(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl 280(%esp), %ecx
+ movl 120(%esp), %esi # 4-byte Reload
+ movl 1376(%esp), %ebp
+ movl %esi, 56(%ebp)
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebp
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 60(%edx), %eax
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 200(%esp), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 248(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 232(%esp), %edi
+ movl 228(%esp), %esi
+ movl 224(%esp), %edx
+ movl 220(%esp), %ecx
+ movl 216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl 204(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 208(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 1376(%esp), %ebx
+ movl %ebp, 60(%ebx)
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 64(%edx), %eax
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 128(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 192(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 184(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 156(%esp), %ebx
+ movl 152(%esp), %edi
+ movl 148(%esp), %esi
+ movl 144(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 64(%eax)
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 68(%eax)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ movl %ebp, 72(%eax)
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 84(%eax)
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 88(%eax)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 92(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 108(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 112(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 112(%eax)
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 116(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 120(%eax)
+ movl %ecx, 124(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 128(%eax)
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 132(%eax)
+ addl $1356, %esp # imm = 0x54C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end257:
+ .size mcl_fpDbl_sqrPre17Lbmi2, .Lfunc_end257-mcl_fpDbl_sqrPre17Lbmi2
+
+ .globl mcl_fp_mont17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_mont17Lbmi2,@function
+mcl_fp_mont17Lbmi2: # @mcl_fp_mont17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2588, %esp # imm = 0xA1C
+ calll .L258$pb
+.L258$pb:
+ popl %ebx
+.Ltmp59:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp59-.L258$pb), %ebx
+ movl 2620(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 2512(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 2512(%esp), %ebp
+ movl 2516(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2580(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 2576(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 2572(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 2568(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 2564(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2560(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2556(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2552(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2548(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2544(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2540(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2536(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2532(%esp), %edi
+ movl 2528(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2524(%esp), %esi
+ movl 2520(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 2440(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ addl 2440(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2444(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 2452(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2456(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2460(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2464(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2468(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2472(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2480(%esp), %eax
+ movl %eax, %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2484(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2488(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2492(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2496(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2500(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 2504(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 2508(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl 2616(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 2368(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ addl 2368(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2372(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2376(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2380(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2384(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 2392(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2396(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2400(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 2404(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 2408(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2412(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2416(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2420(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2424(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 2428(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 2432(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 2436(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2296(%esp), %ecx
+ movl 2620(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ movl 116(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 2296(%esp), %ebp
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 2300(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 2304(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2308(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2312(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2316(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 2320(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2324(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2328(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2332(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 2336(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 2340(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2344(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 2348(%esp), %esi
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 2352(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 2356(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 2360(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 2364(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 2616(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 2224(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 2224(%esp), %ecx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2228(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2232(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2236(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2240(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2244(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2256(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2268(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 2272(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2276(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 2280(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 2284(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 2288(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl 2292(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2152(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 2152(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2156(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2164(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2168(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2172(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2176(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2180(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2184(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2188(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2192(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2196(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2200(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 2204(%esp), %ebp
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl 2208(%esp), %edi
+ movl 132(%esp), %esi # 4-byte Reload
+ adcl 2212(%esp), %esi
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2216(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2220(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 2080(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 2080(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2084(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2088(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2092(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2096(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2100(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2104(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2108(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2112(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2116(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2120(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2124(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 2128(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ adcl 2132(%esp), %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ adcl 2136(%esp), %esi
+ movl %esi, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2140(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2144(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 2148(%esp), %esi
+ sbbl %ebp, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2008(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl %ebp, %eax
+ andl $1, %eax
+ addl 2008(%esp), %edi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2012(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2016(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2020(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2024(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2028(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2032(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2036(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 2040(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 2044(%esp), %edi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2048(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 2052(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 2056(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 2060(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 2064(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 2068(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 2072(%esp), %ebp
+ adcl 2076(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1936(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 1936(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1944(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1948(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1956(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1964(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1968(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1972(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1976(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1980(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1984(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1988(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1992(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 1996(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2000(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2004(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1864(%esp), %ecx
+ movl 2620(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1864(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1868(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1872(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1876(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1880(%esp), %edi
+ adcl 1884(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1888(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1892(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 1896(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1900(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1904(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1908(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1912(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl 1916(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1920(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1924(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1928(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1932(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1792(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1792(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1804(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1820(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1836(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl 1840(%esp), %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1844(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 1848(%esp), %edi
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1852(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1856(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1860(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1720(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1720(%esp), %ecx
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1724(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1728(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1732(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1736(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1740(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1744(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1748(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1752(%esp), %esi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1756(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1760(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1764(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1768(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1772(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ adcl 1776(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ adcl 1780(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1784(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 1788(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1648(%esp), %ecx
+ movl 2612(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ movl 92(%esp), %eax # 4-byte Reload
+ addl 1648(%esp), %eax
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1652(%esp), %edi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1656(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1660(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1664(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1668(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1672(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1676(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1680(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1684(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1688(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1692(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1696(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1700(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1704(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1708(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl 1712(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1716(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %eax, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1576(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1576(%esp), %ebp
+ adcl 1580(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1584(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1588(%esp), %ebp
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1592(%esp), %edi
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1596(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1600(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1604(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1608(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1620(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1628(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1640(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1504(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1504(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1512(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl 1516(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1520(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl 1540(%esp), %edi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1544(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1568(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1572(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1432(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 76(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1432(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1436(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1440(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1448(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1452(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1460(%esp), %ebp
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1464(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl 1468(%esp), %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1472(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl 1476(%esp), %edi
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1480(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1484(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1488(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1492(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1496(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1500(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 1360(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 1360(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1384(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 1400(%esp), %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1408(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1288(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 68(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1288(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1300(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1312(%esp), %ebp
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1320(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1328(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 1336(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 1340(%esp), %edi
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1344(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1348(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1352(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 1216(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 1216(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1236(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 1240(%esp), %ebp
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1244(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1264(%esp), %edi
+ movl %edi, 112(%esp) # 4-byte Spill
+ adcl 1268(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 1144(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1148(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1152(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1156(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 1160(%esp), %edi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1164(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 1168(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1172(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1176(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %esi # 4-byte Reload
+ adcl 1180(%esp), %esi
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1184(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1188(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1192(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1196(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1200(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1204(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1208(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1212(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 1072(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1072(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 1080(%esp), %ebp
+ adcl 1084(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 1104(%esp), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1112(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1000(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 72(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1000(%esp), %edi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1004(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1008(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1012(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1016(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1020(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1024(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl 1028(%esp), %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1032(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 1036(%esp), %edi
+ adcl 1040(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1044(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1056(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1060(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1064(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl 952(%esp), %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 960(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 980(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 984(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 856(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 856(%esp), %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 888(%esp), %ebp
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 896(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 912(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2616(%esp), %ecx
+ movl %ecx, %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 2612(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 784(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 812(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 820(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 828(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 712(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %esi # 4-byte Reload
+ adcl 728(%esp), %esi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 736(%esp), %ebp
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 756(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 104(%esp), %ecx # 4-byte Reload
+ addl 640(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 652(%esp), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 660(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 680(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 696(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 104(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 568(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 600(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 608(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 616(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 624(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 108(%esp), %ecx # 4-byte Reload
+ addl 496(%esp), %ecx
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 524(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 528(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 540(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 544(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 108(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 424(%esp), %esi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %esi # 4-byte Reload
+ adcl 440(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 472(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 480(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 124(%esp), %ecx # 4-byte Reload
+ addl 352(%esp), %ecx
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 364(%esp), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 372(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 404(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 280(%esp), %ebp
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %esi # 4-byte Reload
+ adcl 288(%esp), %esi
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 296(%esp), %ebp
+ adcl 300(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 308(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 64(%eax), %eax
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 128(%esp), %ecx # 4-byte Reload
+ addl 208(%esp), %ecx
+ adcl 212(%esp), %esi
+ movl %esi, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 220(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 224(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 2620(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ andl $1, %edi
+ addl 136(%esp), %esi
+ movl 116(%esp), %edx # 4-byte Reload
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 148(%esp), %edx
+ movl %edx, 116(%esp) # 4-byte Spill
+ adcl 152(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 164(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 168(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 176(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 180(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 184(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 188(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 192(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 196(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 200(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 204(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 132(%esp), %ecx # 4-byte Reload
+ movl 2620(%esp), %ebx
+ subl (%ebx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 8(%ebx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ sbbl 12(%ebx), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %eax, %edx
+ sbbl 16(%ebx), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ sbbl 20(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 24(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 28(%ebx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ sbbl 32(%ebx), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ sbbl 36(%ebx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ sbbl 40(%ebx), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ sbbl 44(%ebx), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ sbbl 48(%ebx), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ sbbl 52(%ebx), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ sbbl 56(%ebx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ movl 108(%esp), %ebx # 4-byte Reload
+ sbbl 60(%ebp), %ebx
+ movl 124(%esp), %esi # 4-byte Reload
+ sbbl 64(%ebp), %esi
+ movl %esi, %ebp
+ sbbl $0, %edi
+ andl $1, %edi
+ jne .LBB258_2
+# BB#1:
+ movl %ebx, 108(%esp) # 4-byte Spill
+.LBB258_2:
+ movl %edi, %ebx
+ testb %bl, %bl
+ movl 132(%esp), %ebx # 4-byte Reload
+ jne .LBB258_4
+# BB#3:
+ movl 12(%esp), %ebx # 4-byte Reload
+.LBB258_4:
+ movl 2608(%esp), %eax
+ movl %ebx, (%eax)
+ movl 120(%esp), %ebx # 4-byte Reload
+ jne .LBB258_6
+# BB#5:
+ movl 16(%esp), %ebx # 4-byte Reload
+.LBB258_6:
+ movl %ebx, 4(%eax)
+ jne .LBB258_8
+# BB#7:
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 116(%esp) # 4-byte Spill
+.LBB258_8:
+ movl 116(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ jne .LBB258_10
+# BB#9:
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 100(%esp) # 4-byte Spill
+.LBB258_10:
+ movl 100(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 112(%esp), %esi # 4-byte Reload
+ jne .LBB258_12
+# BB#11:
+ movl 28(%esp), %esi # 4-byte Reload
+.LBB258_12:
+ movl %esi, 16(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ jne .LBB258_14
+# BB#13:
+ movl 32(%esp), %edx # 4-byte Reload
+.LBB258_14:
+ movl %edx, 20(%eax)
+ jne .LBB258_16
+# BB#15:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB258_16:
+ movl %ecx, 24(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ jne .LBB258_18
+# BB#17:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB258_18:
+ movl %ecx, 28(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ jne .LBB258_20
+# BB#19:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB258_20:
+ movl %ecx, 32(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB258_22
+# BB#21:
+ movl 48(%esp), %ecx # 4-byte Reload
+.LBB258_22:
+ movl %ecx, 36(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB258_24
+# BB#23:
+ movl 52(%esp), %ecx # 4-byte Reload
+.LBB258_24:
+ movl %ecx, 40(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB258_26
+# BB#25:
+ movl 56(%esp), %ecx # 4-byte Reload
+.LBB258_26:
+ movl %ecx, 44(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ jne .LBB258_28
+# BB#27:
+ movl 60(%esp), %ecx # 4-byte Reload
+.LBB258_28:
+ movl %ecx, 48(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ jne .LBB258_30
+# BB#29:
+ movl 88(%esp), %ecx # 4-byte Reload
+.LBB258_30:
+ movl %ecx, 52(%eax)
+ movl 104(%esp), %ecx # 4-byte Reload
+ jne .LBB258_32
+# BB#31:
+ movl 128(%esp), %ecx # 4-byte Reload
+.LBB258_32:
+ movl %ecx, 56(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl 124(%esp), %ecx # 4-byte Reload
+ jne .LBB258_34
+# BB#33:
+ movl %ebp, %ecx
+.LBB258_34:
+ movl %ecx, 64(%eax)
+ addl $2588, %esp # imm = 0xA1C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end258:
+ .size mcl_fp_mont17Lbmi2, .Lfunc_end258-mcl_fp_mont17Lbmi2
+
+ .globl mcl_fp_montNF17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montNF17Lbmi2,@function
+mcl_fp_montNF17Lbmi2: # @mcl_fp_montNF17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2572, %esp # imm = 0xA0C
+ calll .L259$pb
+.L259$pb:
+ popl %ebx
+.Ltmp60:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp60-.L259$pb), %ebx
+ movl 2604(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 2496(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 2496(%esp), %edi
+ movl 2500(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 2564(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 2560(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2556(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 2552(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2548(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2544(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2540(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2536(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2532(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2528(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 2524(%esp), %ebp
+ movl 2520(%esp), %esi
+ movl 2516(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2512(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2508(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2504(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 2424(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 2424(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2428(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2432(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2436(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2440(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2444(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 2448(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 2452(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 2456(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2460(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2464(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2468(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 2472(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2476(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 2480(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2484(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2488(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2492(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 2352(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 2420(%esp), %ecx
+ movl 112(%esp), %edx # 4-byte Reload
+ addl 2352(%esp), %edx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2356(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2360(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2368(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2372(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2376(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 2380(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2384(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2388(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2392(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 2396(%esp), %esi
+ movl %esi, %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2400(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2404(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2408(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2412(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2416(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2280(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 2280(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2284(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2288(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2292(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2296(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2300(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2304(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2308(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2312(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 2316(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 2324(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2332(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2336(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 2340(%esp), %ebp
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 2344(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2348(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 2208(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 2276(%esp), %eax
+ movl 92(%esp), %edx # 4-byte Reload
+ addl 2208(%esp), %edx
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2212(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2216(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 2220(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2224(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 2228(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 2232(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 2236(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 2240(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2244(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2248(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2252(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 2256(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 2260(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 2264(%esp), %ebp
+ adcl 2268(%esp), %edi
+ movl %edi, %esi
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 2272(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2136(%esp), %ecx
+ movl 2604(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ addl 2136(%esp), %edi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2140(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2144(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2148(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2156(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2160(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2164(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2168(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2172(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2176(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2180(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2184(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 2188(%esp), %edi
+ adcl 2192(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ adcl 2196(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 2200(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2204(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 2064(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 2132(%esp), %eax
+ movl 104(%esp), %edx # 4-byte Reload
+ addl 2064(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2068(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 2072(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2076(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 2080(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 2084(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 2088(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2092(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2096(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2100(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2104(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 2108(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 2112(%esp), %edi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 2116(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 2120(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 2124(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2128(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1992(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1992(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1996(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2000(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2004(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2008(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 2016(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2020(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2024(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2028(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2032(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2036(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 2040(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 2044(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2048(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2052(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 2056(%esp), %edi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2060(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1920(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1988(%esp), %eax
+ movl 76(%esp), %edx # 4-byte Reload
+ addl 1920(%esp), %edx
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1924(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1928(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1932(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1936(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1940(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1944(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1948(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1952(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1956(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1960(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1964(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 1968(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1972(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 1976(%esp), %esi
+ adcl 1980(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1984(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1848(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1848(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1852(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1856(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1860(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1864(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1868(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1872(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1876(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1880(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1884(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1888(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1892(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1896(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1900(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 1904(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1908(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1912(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1916(%esp), %eax
+ movl %eax, %edi
+ movl 2600(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1776(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1844(%esp), %eax
+ movl 84(%esp), %edx # 4-byte Reload
+ addl 1776(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1780(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1784(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1788(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1792(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1796(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1800(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1804(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1808(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1812(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1816(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1820(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1824(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1828(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl 1832(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1836(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1840(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1704(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1704(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1708(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1712(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1716(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1720(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1724(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1728(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1732(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1736(%esp), %esi
+ movl %esi, %ebp
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 1740(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1744(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1756(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1768(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1772(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1632(%esp), %ecx
+ movl 2596(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ movl 1700(%esp), %eax
+ movl 80(%esp), %edx # 4-byte Reload
+ addl 1632(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1636(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1640(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1644(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1648(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1652(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1656(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1660(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl 1664(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1668(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1672(%esp), %esi
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1676(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1680(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1684(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1688(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1692(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1696(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1560(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1560(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1564(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1568(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1576(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1580(%esp), %edi
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1584(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1592(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1600(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1604(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 1608(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1628(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1488(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1556(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1488(%esp), %ecx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1492(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1496(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 1500(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 1504(%esp), %edi
+ adcl 1508(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1512(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 1516(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 1520(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1524(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %edx # 4-byte Reload
+ adcl 1528(%esp), %edx
+ movl %edx, 116(%esp) # 4-byte Spill
+ adcl 1532(%esp), %esi
+ movl %esi, %ebp
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1536(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1540(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1544(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1548(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1552(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1416(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1416(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1428(%esp), %esi
+ adcl 1432(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1436(%esp), %edi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1444(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1448(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 1460(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1472(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1484(%esp), %ebp
+ movl 2600(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 1344(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1412(%esp), %eax
+ movl 52(%esp), %edx # 4-byte Reload
+ addl 1344(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1348(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 1352(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1360(%esp), %edi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1364(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1368(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1372(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1376(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1380(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1384(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1388(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1392(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1396(%esp), %esi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1400(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1404(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1408(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1272(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1272(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1288(%esp), %edi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1304(%esp), %ebp
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1312(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1320(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1324(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 1200(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1268(%esp), %eax
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 1200(%esp), %ecx
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 1204(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 1208(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 1212(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1216(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 1220(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1224(%esp), %esi
+ adcl 1228(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 1232(%esp), %edi
+ movl 112(%esp), %edx # 4-byte Reload
+ adcl 1236(%esp), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1240(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1244(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1248(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1252(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1256(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 1260(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1264(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1128(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1128(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1152(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1160(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 1172(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1180(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1188(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 1056(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1124(%esp), %edx
+ movl 68(%esp), %eax # 4-byte Reload
+ addl 1056(%esp), %eax
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1060(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1064(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1072(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1088(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1092(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1096(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1100(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 1104(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1108(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1112(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 1116(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1120(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 984(%esp), %esi
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 988(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 996(%esp), %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1044(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 912(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 980(%esp), %eax
+ addl 912(%esp), %esi
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 916(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 920(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 924(%esp), %edi
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 928(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 932(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %edx # 4-byte Reload
+ adcl 936(%esp), %edx
+ movl %edx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ adcl 940(%esp), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 944(%esp), %ebp
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 960(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 968(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 972(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 976(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 840(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 840(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 864(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 872(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 888(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 768(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 836(%esp), %edx
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 768(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 788(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 792(%esp), %edi
+ adcl 796(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 812(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 828(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 696(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 716(%esp), %esi
+ adcl 720(%esp), %edi
+ movl %edi, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 732(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 756(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 692(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 624(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 636(%esp), %ebp
+ adcl 640(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 656(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 660(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 552(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 564(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 576(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 588(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 600(%esp), %ebp
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 604(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 548(%esp), %edx
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 480(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 500(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 524(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 528(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 408(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 420(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 432(%esp), %ebp
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 436(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 336(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 404(%esp), %edx
+ movl 108(%esp), %ecx # 4-byte Reload
+ addl 336(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 344(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 356(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ adcl 360(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 364(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 372(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 264(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 280(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 300(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 308(%esp), %edi
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 312(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 64(%eax), %eax
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 260(%esp), %edx
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 192(%esp), %ecx
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 204(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 236(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 120(%esp), %esi
+ movl 92(%esp), %esi # 4-byte Reload
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 128(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ adcl 132(%esp), %esi
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 156(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 160(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 168(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 172(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 176(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 180(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 184(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 188(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 2604(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %ebp
+ movl %esi, %ebx
+ sbbl 8(%edi), %ebx
+ movl 104(%esp), %ecx # 4-byte Reload
+ sbbl 12(%edi), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ sbbl 20(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 24(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 44(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 48(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 52(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ sbbl 56(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 60(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 64(%edi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ sarl $31, %eax
+ testl %eax, %eax
+ movl 116(%esp), %edi # 4-byte Reload
+ js .LBB259_2
+# BB#1:
+ movl %edx, %edi
+.LBB259_2:
+ movl 2592(%esp), %edx
+ movl %edi, (%edx)
+ movl 112(%esp), %edi # 4-byte Reload
+ js .LBB259_4
+# BB#3:
+ movl %ebp, %edi
+.LBB259_4:
+ movl %edi, 4(%edx)
+ js .LBB259_6
+# BB#5:
+ movl %ebx, %esi
+.LBB259_6:
+ movl %esi, 8(%edx)
+ movl 104(%esp), %esi # 4-byte Reload
+ js .LBB259_8
+# BB#7:
+ movl %ecx, %esi
+.LBB259_8:
+ movl %esi, 12(%edx)
+ movl 76(%esp), %ecx # 4-byte Reload
+ js .LBB259_10
+# BB#9:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB259_10:
+ movl %ecx, 16(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB259_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB259_12:
+ movl %eax, 20(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB259_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB259_14:
+ movl %eax, 24(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB259_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB259_16:
+ movl %eax, 28(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB259_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB259_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB259_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB259_20:
+ movl %eax, 36(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB259_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB259_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB259_24
+# BB#23:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB259_24:
+ movl %eax, 44(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB259_26
+# BB#25:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB259_26:
+ movl %eax, 48(%edx)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB259_28
+# BB#27:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB259_28:
+ movl %eax, 52(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB259_30
+# BB#29:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB259_30:
+ movl %eax, 56(%edx)
+ movl 108(%esp), %eax # 4-byte Reload
+ js .LBB259_32
+# BB#31:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB259_32:
+ movl %eax, 60(%edx)
+ movl 100(%esp), %eax # 4-byte Reload
+ js .LBB259_34
+# BB#33:
+ movl 92(%esp), %eax # 4-byte Reload
+.LBB259_34:
+ movl %eax, 64(%edx)
+ addl $2572, %esp # imm = 0xA0C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end259:
+ .size mcl_fp_montNF17Lbmi2, .Lfunc_end259-mcl_fp_montNF17Lbmi2
+
+ .globl mcl_fp_montRed17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_montRed17Lbmi2,@function
+mcl_fp_montRed17Lbmi2: # @mcl_fp_montRed17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1436, %esp # imm = 0x59C
+ calll .L260$pb
+.L260$pb:
+ popl %eax
+.Ltmp61:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp61-.L260$pb), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1464(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 1460(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 132(%ecx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 128(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 124(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 112(%ecx), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 108(%ecx), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 100(%ecx), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 92(%ecx), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl %esi, 180(%esp) # 4-byte Spill
+ movl 80(%ecx), %edi
+ movl %edi, 196(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 192(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 204(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 64(%ecx), %ebp
+ movl %ebp, 176(%esp) # 4-byte Spill
+ movl 60(%ecx), %ebp
+ movl %ebp, 164(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 52(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %esi
+ movl 12(%ecx), %edi
+ movl 8(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 64(%edx), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 1360(%esp), %ecx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 76(%esp), %eax # 4-byte Reload
+ addl 1360(%esp), %eax
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1364(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1372(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1376(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 1380(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ adcl $0, 204(%esp) # 4-byte Folded Spill
+ adcl $0, 192(%esp) # 4-byte Folded Spill
+ adcl $0, 196(%esp) # 4-byte Folded Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ movl 128(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1288(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1288(%esp), %esi
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1292(%esp), %edx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1312(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1320(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1324(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 1336(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ adcl $0, 192(%esp) # 4-byte Folded Spill
+ adcl $0, 196(%esp) # 4-byte Folded Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %esi
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 128(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1216(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1216(%esp), %ebp
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1244(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 1260(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl 1264(%esp), %edi
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ adcl $0, 196(%esp) # 4-byte Folded Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 184(%esp) # 4-byte Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1144(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1148(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl 1188(%esp), %edi
+ movl %edi, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ movl 188(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1072(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1072(%esp), %esi
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1076(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 188(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ movl 172(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ movl 152(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1000(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1000(%esp), %esi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1004(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 172(%esp) # 4-byte Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 928(%esp), %esi
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 932(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 160(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 856(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 856(%esp), %esi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ adcl 924(%esp), %ebp
+ movl %ebp, 168(%esp) # 4-byte Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 160(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ movl 96(%esp), %ebp # 4-byte Reload
+ imull %ebp, %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 784(%esp), %edi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull %ebp, %eax
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 712(%esp), %esi
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %ebp # 4-byte Reload
+ adcl 760(%esp), %ebp
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl 780(%esp), %edi
+ movl %edi, 156(%esp) # 4-byte Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 640(%esp), %esi
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 644(%esp), %ecx
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %edi # 4-byte Reload
+ adcl 672(%esp), %edi
+ movl 192(%esp), %esi # 4-byte Reload
+ adcl 676(%esp), %esi
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl 684(%esp), %ebp
+ movl %ebp, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 1464(%esp), %eax
+ movl %eax, %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 568(%esp), %ebp
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 572(%esp), %ecx
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %ebp # 4-byte Reload
+ adcl 588(%esp), %ebp
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ adcl 596(%esp), %edi
+ movl %edi, 204(%esp) # 4-byte Spill
+ adcl 600(%esp), %esi
+ movl %esi, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %esi # 4-byte Reload
+ adcl 604(%esp), %esi
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 496(%esp), %edi
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 500(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl 512(%esp), %ebp
+ movl %ebp, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %ebp # 4-byte Reload
+ adcl 516(%esp), %ebp
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ adcl 528(%esp), %esi
+ movl %esi, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %edi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 424(%esp), %edi
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 428(%esp), %ecx
+ movl 164(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ adcl 440(%esp), %ebp
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 352(%esp), %esi
+ movl %edi, %ecx
+ adcl 356(%esp), %ecx
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ adcl 364(%esp), %ebp
+ movl %ebp, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %edi # 4-byte Reload
+ adcl 384(%esp), %edi
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 280(%esp), %ebp
+ movl 176(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ adcl 308(%esp), %edi
+ movl %edi, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 208(%esp), %ebp
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %edx # 4-byte Reload
+ adcl 216(%esp), %edx
+ movl %edx, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %ecx # 4-byte Reload
+ adcl 220(%esp), %ecx
+ movl %ecx, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %ebx # 4-byte Reload
+ adcl 264(%esp), %ebx
+ movl %ebx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 276(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 200(%esp), %edi # 4-byte Reload
+ subl 16(%esp), %edi # 4-byte Folded Reload
+ sbbl 4(%esp), %edx # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ movl 196(%esp), %eax # 4-byte Reload
+ sbbl 12(%esp), %eax # 4-byte Folded Reload
+ sbbl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 184(%esp), %esi # 4-byte Reload
+ sbbl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 188(%esp), %esi # 4-byte Reload
+ sbbl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 168(%esp), %esi # 4-byte Reload
+ sbbl 32(%esp), %esi # 4-byte Folded Reload
+ movl 172(%esp), %ebp # 4-byte Reload
+ sbbl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ sbbl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 160(%esp), %ebp # 4-byte Reload
+ sbbl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ sbbl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 144(%esp), %ebp # 4-byte Reload
+ sbbl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 136(%esp) # 4-byte Spill
+ sbbl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 140(%esp) # 4-byte Spill
+ movl 132(%esp), %ebx # 4-byte Reload
+ sbbl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 148(%esp) # 4-byte Spill
+ movl 124(%esp), %ebx # 4-byte Reload
+ sbbl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 164(%esp) # 4-byte Spill
+ movl 116(%esp), %ebx # 4-byte Reload
+ sbbl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 176(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB260_2
+# BB#1:
+ movl %esi, 168(%esp) # 4-byte Spill
+.LBB260_2:
+ testb %bl, %bl
+ movl 200(%esp), %esi # 4-byte Reload
+ jne .LBB260_4
+# BB#3:
+ movl %edi, %esi
+.LBB260_4:
+ movl 1456(%esp), %edi
+ movl %esi, (%edi)
+ movl 156(%esp), %esi # 4-byte Reload
+ movl 204(%esp), %ebx # 4-byte Reload
+ jne .LBB260_6
+# BB#5:
+ movl %edx, %ebx
+.LBB260_6:
+ movl %ebx, 4(%edi)
+ movl 144(%esp), %ebx # 4-byte Reload
+ movl 192(%esp), %edx # 4-byte Reload
+ jne .LBB260_8
+# BB#7:
+ movl %ecx, %edx
+.LBB260_8:
+ movl %edx, 8(%edi)
+ movl 132(%esp), %edx # 4-byte Reload
+ movl 196(%esp), %ecx # 4-byte Reload
+ jne .LBB260_10
+# BB#9:
+ movl %eax, %ecx
+.LBB260_10:
+ movl %ecx, 12(%edi)
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl 180(%esp), %eax # 4-byte Reload
+ jne .LBB260_12
+# BB#11:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB260_12:
+ movl %eax, 16(%edi)
+ movl 188(%esp), %eax # 4-byte Reload
+ movl 184(%esp), %ebp # 4-byte Reload
+ jne .LBB260_14
+# BB#13:
+ movl 92(%esp), %ebp # 4-byte Reload
+.LBB260_14:
+ movl %ebp, 20(%edi)
+ movl 152(%esp), %ebp # 4-byte Reload
+ jne .LBB260_16
+# BB#15:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB260_16:
+ movl %eax, 24(%edi)
+ movl 168(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%edi)
+ jne .LBB260_18
+# BB#17:
+ movl 104(%esp), %eax # 4-byte Reload
+ movl %eax, 172(%esp) # 4-byte Spill
+.LBB260_18:
+ movl 172(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%edi)
+ jne .LBB260_20
+# BB#19:
+ movl 108(%esp), %esi # 4-byte Reload
+.LBB260_20:
+ movl %esi, 36(%edi)
+ jne .LBB260_22
+# BB#21:
+ movl 112(%esp), %eax # 4-byte Reload
+ movl %eax, 160(%esp) # 4-byte Spill
+.LBB260_22:
+ movl 160(%esp), %esi # 4-byte Reload
+ movl %esi, 40(%edi)
+ movl 128(%esp), %eax # 4-byte Reload
+ jne .LBB260_24
+# BB#23:
+ movl 120(%esp), %ebp # 4-byte Reload
+.LBB260_24:
+ movl %ebp, 44(%edi)
+ jne .LBB260_26
+# BB#25:
+ movl 136(%esp), %ebx # 4-byte Reload
+.LBB260_26:
+ movl %ebx, 48(%edi)
+ jne .LBB260_28
+# BB#27:
+ movl 140(%esp), %eax # 4-byte Reload
+.LBB260_28:
+ movl %eax, 52(%edi)
+ jne .LBB260_30
+# BB#29:
+ movl 148(%esp), %edx # 4-byte Reload
+.LBB260_30:
+ movl %edx, 56(%edi)
+ movl 116(%esp), %eax # 4-byte Reload
+ jne .LBB260_32
+# BB#31:
+ movl 164(%esp), %ecx # 4-byte Reload
+.LBB260_32:
+ movl %ecx, 60(%edi)
+ jne .LBB260_34
+# BB#33:
+ movl 176(%esp), %eax # 4-byte Reload
+.LBB260_34:
+ movl %eax, 64(%edi)
+ addl $1436, %esp # imm = 0x59C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end260:
+ .size mcl_fp_montRed17Lbmi2, .Lfunc_end260-mcl_fp_montRed17Lbmi2
+
+ .globl mcl_fp_addPre17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addPre17Lbmi2,@function
+mcl_fp_addPre17Lbmi2: # @mcl_fp_addPre17Lbmi2
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %ebx
+ adcl 8(%ecx), %ebx
+ movl 16(%esp), %edi
+ movl %edx, (%edi)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%edi)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %ebx, 8(%edi)
+ movl 20(%eax), %ebx
+ movl %edx, 12(%edi)
+ movl 20(%ecx), %edx
+ adcl %ebx, %edx
+ movl 24(%eax), %ebx
+ movl %esi, 16(%edi)
+ movl 24(%ecx), %esi
+ adcl %ebx, %esi
+ movl 28(%eax), %ebx
+ movl %edx, 20(%edi)
+ movl 28(%ecx), %edx
+ adcl %ebx, %edx
+ movl 32(%eax), %ebx
+ movl %esi, 24(%edi)
+ movl 32(%ecx), %esi
+ adcl %ebx, %esi
+ movl 36(%eax), %ebx
+ movl %edx, 28(%edi)
+ movl 36(%ecx), %edx
+ adcl %ebx, %edx
+ movl 40(%eax), %ebx
+ movl %esi, 32(%edi)
+ movl 40(%ecx), %esi
+ adcl %ebx, %esi
+ movl 44(%eax), %ebx
+ movl %edx, 36(%edi)
+ movl 44(%ecx), %edx
+ adcl %ebx, %edx
+ movl 48(%eax), %ebx
+ movl %esi, 40(%edi)
+ movl 48(%ecx), %esi
+ adcl %ebx, %esi
+ movl 52(%eax), %ebx
+ movl %edx, 44(%edi)
+ movl 52(%ecx), %edx
+ adcl %ebx, %edx
+ movl 56(%eax), %ebx
+ movl %esi, 48(%edi)
+ movl 56(%ecx), %esi
+ adcl %ebx, %esi
+ movl 60(%eax), %ebx
+ movl %edx, 52(%edi)
+ movl 60(%ecx), %edx
+ adcl %ebx, %edx
+ movl %esi, 56(%edi)
+ movl %edx, 60(%edi)
+ movl 64(%eax), %eax
+ movl 64(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 64(%edi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end261:
+ .size mcl_fp_addPre17Lbmi2, .Lfunc_end261-mcl_fp_addPre17Lbmi2
+
+ .globl mcl_fp_subPre17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subPre17Lbmi2,@function
+mcl_fp_subPre17Lbmi2: # @mcl_fp_subPre17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebp
+ sbbl 8(%edx), %ebp
+ movl 20(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebx)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebp, 8(%ebx)
+ movl 20(%edx), %ebp
+ movl %esi, 12(%ebx)
+ movl 20(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 24(%edx), %ebp
+ movl %edi, 16(%ebx)
+ movl 24(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 28(%edx), %ebp
+ movl %esi, 20(%ebx)
+ movl 28(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 32(%edx), %ebp
+ movl %edi, 24(%ebx)
+ movl 32(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 36(%edx), %ebp
+ movl %esi, 28(%ebx)
+ movl 36(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 40(%edx), %ebp
+ movl %edi, 32(%ebx)
+ movl 40(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 44(%edx), %ebp
+ movl %esi, 36(%ebx)
+ movl 44(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 48(%edx), %ebp
+ movl %edi, 40(%ebx)
+ movl 48(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 52(%edx), %ebp
+ movl %esi, 44(%ebx)
+ movl 52(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 56(%edx), %ebp
+ movl %edi, 48(%ebx)
+ movl 56(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 60(%edx), %ebp
+ movl %esi, 52(%ebx)
+ movl 60(%ecx), %esi
+ sbbl %ebp, %esi
+ movl %edi, 56(%ebx)
+ movl %esi, 60(%ebx)
+ movl 64(%edx), %edx
+ movl 64(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 64(%ebx)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end262:
+ .size mcl_fp_subPre17Lbmi2, .Lfunc_end262-mcl_fp_subPre17Lbmi2
+
+ .globl mcl_fp_shr1_17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_shr1_17Lbmi2,@function
+mcl_fp_shr1_17Lbmi2: # @mcl_fp_shr1_17Lbmi2
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 48(%ecx)
+ movl 56(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 52(%ecx)
+ movl 60(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 56(%ecx)
+ movl 64(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl %esi, 60(%ecx)
+ shrl %eax
+ movl %eax, 64(%ecx)
+ popl %esi
+ retl
+.Lfunc_end263:
+ .size mcl_fp_shr1_17Lbmi2, .Lfunc_end263-mcl_fp_shr1_17Lbmi2
+
+ .globl mcl_fp_add17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_add17Lbmi2,@function
+mcl_fp_add17Lbmi2: # @mcl_fp_add17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 88(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ movl 84(%esp), %edx
+ addl (%edx), %ecx
+ movl %ecx, %ebx
+ adcl 4(%edx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ adcl 8(%edx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl 16(%edx), %edi
+ adcl 12(%esi), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ adcl 20(%esi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ adcl 24(%esi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ adcl 28(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ adcl 32(%esi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 36(%edx), %eax
+ adcl 36(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%edx), %eax
+ adcl 40(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%edx), %eax
+ adcl 44(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 48(%edx), %eax
+ adcl 48(%esi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ adcl 52(%esi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 56(%edx), %eax
+ adcl 56(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 60(%edx), %ebp
+ adcl 60(%esi), %ebp
+ movl 64(%edx), %edx
+ adcl 64(%esi), %edx
+ movl 80(%esp), %esi
+ movl %ebx, (%esi)
+ movl %ebx, %eax
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%esi)
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%esi)
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%esi)
+ movl %edi, 16(%esi)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%esi)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%esi)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%esi)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%esi)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%esi)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%esi)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%esi)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 48(%esi)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 52(%esi)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 56(%esi)
+ movl %ebp, 60(%esi)
+ movl %edx, 64(%esi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 92(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 8(%edi), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 4(%esp), %eax # 4-byte Reload
+ sbbl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ sbbl 20(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 24(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ sbbl 44(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ sbbl 48(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 52(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ sbbl 56(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 60(%edi), %ebp
+ sbbl 64(%edi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB264_2
+# BB#1: # %nocarry
+ movl (%esp), %edi # 4-byte Reload
+ movl %edi, (%esi)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%esi)
+ movl 56(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%esi)
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%esi)
+ movl 4(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%esi)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%esi)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%esi)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%esi)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%esi)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%esi)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%esi)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%esi)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 48(%esi)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%esi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%esi)
+ movl %ebp, 60(%esi)
+ movl %edx, 64(%esi)
+.LBB264_2: # %carry
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end264:
+ .size mcl_fp_add17Lbmi2, .Lfunc_end264-mcl_fp_add17Lbmi2
+
+ .globl mcl_fp_addNF17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_addNF17Lbmi2,@function
+mcl_fp_addNF17Lbmi2: # @mcl_fp_addNF17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $132, %esp
+ movl 160(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 156(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 4(%esi), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 64(%eax), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 60(%eax), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 56(%eax), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 52(%eax), %ebp
+ movl 48(%eax), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 44(%eax), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 40(%eax), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 36(%eax), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 28(%eax), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 20(%eax), %ebx
+ movl 16(%eax), %edi
+ movl 12(%eax), %edx
+ movl 8(%eax), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 12(%esi), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 24(%esi), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 28(%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 32(%esi), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esi), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 40(%esi), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esi), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 48(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 52(%esi), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 56(%esi), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 60(%esi), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 64(%esi), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 164(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ subl (%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 4(%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 8(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%esi), %edx
+ sbbl 16(%esi), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%esi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 120(%esp), %ebx # 4-byte Reload
+ sbbl 24(%esi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 28(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 32(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 36(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ sbbl 40(%esi), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ sbbl 44(%esi), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ sbbl 48(%esi), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 52(%esi), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ movl %eax, %ebp
+ sbbl 56(%esi), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 60(%esi), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ movl %eax, %ebx
+ sbbl 64(%esi), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl %ebx, %esi
+ sarl $31, %esi
+ testl %esi, %esi
+ movl 84(%esp), %esi # 4-byte Reload
+ js .LBB265_2
+# BB#1:
+ movl (%esp), %esi # 4-byte Reload
+.LBB265_2:
+ movl 152(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB265_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB265_4:
+ movl %eax, 4(%ebx)
+ movl 108(%esp), %eax # 4-byte Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ js .LBB265_6
+# BB#5:
+ movl 8(%esp), %edi # 4-byte Reload
+.LBB265_6:
+ movl %edi, 8(%ebx)
+ movl 116(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ js .LBB265_8
+# BB#7:
+ movl %edx, %ecx
+.LBB265_8:
+ movl %ecx, 12(%ebx)
+ movl 104(%esp), %ecx # 4-byte Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ js .LBB265_10
+# BB#9:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB265_10:
+ movl %edx, 16(%ebx)
+ movl %ebp, %edx
+ js .LBB265_12
+# BB#11:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB265_12:
+ movl %esi, 20(%ebx)
+ movl 112(%esp), %ebp # 4-byte Reload
+ js .LBB265_14
+# BB#13:
+ movl 20(%esp), %esi # 4-byte Reload
+ movl %esi, 120(%esp) # 4-byte Spill
+.LBB265_14:
+ movl 120(%esp), %esi # 4-byte Reload
+ movl %esi, 24(%ebx)
+ js .LBB265_16
+# BB#15:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB265_16:
+ movl %ecx, 28(%ebx)
+ js .LBB265_18
+# BB#17:
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 124(%esp) # 4-byte Spill
+.LBB265_18:
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%ebx)
+ js .LBB265_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB265_20:
+ movl %eax, 36(%ebx)
+ movl 100(%esp), %ecx # 4-byte Reload
+ js .LBB265_22
+# BB#21:
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 128(%esp) # 4-byte Spill
+.LBB265_22:
+ movl 128(%esp), %eax # 4-byte Reload
+ movl %eax, 40(%ebx)
+ js .LBB265_24
+# BB#23:
+ movl 40(%esp), %ebp # 4-byte Reload
+.LBB265_24:
+ movl %ebp, 44(%ebx)
+ js .LBB265_26
+# BB#25:
+ movl 44(%esp), %edi # 4-byte Reload
+.LBB265_26:
+ movl %edi, 48(%ebx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB265_28
+# BB#27:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB265_28:
+ movl %eax, 52(%ebx)
+ js .LBB265_30
+# BB#29:
+ movl 52(%esp), %edx # 4-byte Reload
+.LBB265_30:
+ movl %edx, 56(%ebx)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB265_32
+# BB#31:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB265_32:
+ movl %eax, 60(%ebx)
+ js .LBB265_34
+# BB#33:
+ movl 60(%esp), %ecx # 4-byte Reload
+.LBB265_34:
+ movl %ecx, 64(%ebx)
+ addl $132, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end265:
+ .size mcl_fp_addNF17Lbmi2, .Lfunc_end265-mcl_fp_addNF17Lbmi2
+
+ .globl mcl_fp_sub17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_sub17Lbmi2,@function
+mcl_fp_sub17Lbmi2: # @mcl_fp_sub17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl 88(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 92(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ sbbl 32(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ sbbl 44(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 48(%esi), %edx
+ sbbl 48(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 52(%esi), %ecx
+ sbbl 52(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 56(%esi), %eax
+ sbbl 56(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 60(%esi), %ebp
+ sbbl 60(%edi), %ebp
+ movl 64(%esi), %esi
+ sbbl 64(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 84(%esp), %ebx
+ movl 56(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 60(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%ebx)
+ movl %edx, 48(%ebx)
+ movl %ecx, 52(%ebx)
+ movl %eax, 56(%ebx)
+ movl %ebp, 60(%ebx)
+ movl %esi, 64(%ebx)
+ je .LBB266_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 96(%esp), %esi
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 8(%esi), %edi
+ movl 12(%esi), %eax
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 44(%esi), %eax
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl 48(%esi), %ecx
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%ebx)
+ movl 52(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 48(%ebx)
+ movl 56(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 52(%ebx)
+ movl %ecx, 56(%ebx)
+ movl 60(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 60(%ebx)
+ movl 64(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ebx)
+.LBB266_2: # %nocarry
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end266:
+ .size mcl_fp_sub17Lbmi2, .Lfunc_end266-mcl_fp_sub17Lbmi2
+
+ .globl mcl_fp_subNF17Lbmi2
+ .align 16, 0x90
+ .type mcl_fp_subNF17Lbmi2,@function
+mcl_fp_subNF17Lbmi2: # @mcl_fp_subNF17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $116, %esp
+ movl 140(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 144(%esp), %edi
+ subl (%edi), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ sbbl 4(%edi), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 60(%ecx), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 52(%ecx), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 40(%ecx), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 36(%edi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ sbbl 40(%edi), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ sbbl 52(%edi), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ sbbl 56(%edi), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ sbbl 60(%edi), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 64(%edi), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ sarl $31, %eax
+ movl %eax, %edx
+ shldl $1, %ecx, %edx
+ movl 148(%esp), %ebx
+ movl 28(%ebx), %ecx
+ andl %edx, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ andl %edx, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 4(%ebx), %ecx
+ andl %edx, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ andl (%ebx), %edx
+ movl 64(%ebx), %edi
+ movl %eax, %ecx
+ andl %ecx, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ rorxl $31, %ecx, %eax
+ andl 60(%ebx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 56(%ebx), %ecx
+ andl %eax, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%ebx), %ecx
+ andl %eax, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 48(%ebx), %ecx
+ andl %eax, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%ebx), %ecx
+ andl %eax, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 40(%ebx), %ecx
+ andl %eax, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 36(%ebx), %ecx
+ andl %eax, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 32(%ebx), %ecx
+ andl %eax, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 24(%ebx), %ebp
+ andl %eax, %ebp
+ movl 20(%ebx), %edi
+ andl %eax, %edi
+ movl 16(%ebx), %esi
+ andl %eax, %esi
+ andl 8(%ebx), %eax
+ addl 72(%esp), %edx # 4-byte Folded Reload
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl 136(%esp), %ebx
+ movl %edx, (%ebx)
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 4(%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 8(%ebx)
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 12(%ebx)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 16(%ebx)
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 20(%ebx)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 24(%ebx)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 112(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%ebx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 48(%ebx)
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 52(%ebx)
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 56(%ebx)
+ movl %eax, 60(%ebx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ebx)
+ addl $116, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end267:
+ .size mcl_fp_subNF17Lbmi2, .Lfunc_end267-mcl_fp_subNF17Lbmi2
+
+ .globl mcl_fpDbl_add17Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_add17Lbmi2,@function
+mcl_fpDbl_add17Lbmi2: # @mcl_fpDbl_add17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $128, %esp
+ movl 156(%esp), %ecx
+ movl 152(%esp), %edx
+ movl 12(%edx), %edi
+ movl 16(%edx), %esi
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edx), %ebp
+ movl 148(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edx), %ebp
+ adcl 8(%edx), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %esi
+ movl %ebp, 4(%eax)
+ movl 76(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%edx), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %esi, 16(%eax)
+ movl 24(%edx), %esi
+ adcl %ebx, %esi
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%edx), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %esi, 24(%eax)
+ movl 32(%edx), %esi
+ adcl %ebx, %esi
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%edx), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %esi, 32(%eax)
+ movl 40(%edx), %esi
+ adcl %ebx, %esi
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%edx), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %esi, 40(%eax)
+ movl 48(%edx), %esi
+ adcl %ebx, %esi
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%edx), %edi
+ adcl %ebx, %edi
+ movl 56(%ecx), %ebx
+ movl %esi, 48(%eax)
+ movl 56(%edx), %esi
+ adcl %ebx, %esi
+ movl 60(%ecx), %ebx
+ movl %edi, 52(%eax)
+ movl 60(%edx), %edi
+ adcl %ebx, %edi
+ movl 64(%ecx), %ebx
+ movl %esi, 56(%eax)
+ movl 64(%edx), %esi
+ adcl %ebx, %esi
+ movl 68(%ecx), %ebx
+ movl %edi, 60(%eax)
+ movl 68(%edx), %edi
+ adcl %ebx, %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 72(%ecx), %edi
+ movl %esi, 64(%eax)
+ movl 72(%edx), %eax
+ adcl %edi, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 76(%edx), %eax
+ adcl %ebp, %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl 80(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl 84(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 88(%ecx), %esi
+ movl 88(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%ecx), %esi
+ movl 92(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 96(%ecx), %esi
+ movl 96(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 100(%ecx), %esi
+ movl 100(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl 104(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%ecx), %esi
+ movl 108(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 112(%ecx), %esi
+ movl 112(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 116(%ecx), %esi
+ movl 116(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 120(%ecx), %edi
+ movl 120(%edx), %esi
+ adcl %edi, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 124(%ecx), %ebx
+ movl 124(%edx), %edi
+ adcl %ebx, %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 128(%ecx), %ebx
+ movl 128(%edx), %ebp
+ adcl %ebx, %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 132(%ecx), %ecx
+ movl 132(%edx), %edx
+ adcl %ecx, %edx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 160(%esp), %ebx
+ movl 92(%esp), %eax # 4-byte Reload
+ subl (%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ sbbl 4(%ebx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 8(%ebx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 12(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 16(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ sbbl 20(%ebx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ sbbl 24(%ebx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 28(%ebx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 32(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 36(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 40(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ sbbl 44(%ebx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 48(%ebx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 52(%ebx), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ sbbl 56(%ebx), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ sbbl 60(%ebx), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ sbbl 64(%ebx), %ebp
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB268_2
+# BB#1:
+ movl %ebp, %edx
+.LBB268_2:
+ testb %cl, %cl
+ movl 92(%esp), %eax # 4-byte Reload
+ movl 88(%esp), %esi # 4-byte Reload
+ movl 84(%esp), %edi # 4-byte Reload
+ movl 80(%esp), %ebx # 4-byte Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ jne .LBB268_4
+# BB#3:
+ movl (%esp), %esi # 4-byte Reload
+ movl 4(%esp), %edi # 4-byte Reload
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB268_4:
+ movl 148(%esp), %ecx
+ movl %eax, 68(%ecx)
+ movl %ecx, %eax
+ movl 96(%esp), %ecx # 4-byte Reload
+ movl %ecx, 72(%eax)
+ movl 100(%esp), %ecx # 4-byte Reload
+ movl %ecx, 76(%eax)
+ movl 104(%esp), %ecx # 4-byte Reload
+ movl %ecx, 80(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl %ecx, 84(%eax)
+ movl 112(%esp), %ecx # 4-byte Reload
+ movl %ecx, 88(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ movl %ecx, 92(%eax)
+ movl 120(%esp), %ecx # 4-byte Reload
+ movl %ecx, 96(%eax)
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl %ecx, 100(%eax)
+ movl %ebp, 104(%eax)
+ movl %ebx, 108(%eax)
+ movl %edi, 112(%eax)
+ movl %esi, 116(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ jne .LBB268_6
+# BB#5:
+ movl 52(%esp), %esi # 4-byte Reload
+.LBB268_6:
+ movl %esi, 120(%eax)
+ movl 68(%esp), %esi # 4-byte Reload
+ jne .LBB268_8
+# BB#7:
+ movl 56(%esp), %esi # 4-byte Reload
+.LBB268_8:
+ movl %esi, 124(%eax)
+ jne .LBB268_10
+# BB#9:
+ movl 60(%esp), %ecx # 4-byte Reload
+.LBB268_10:
+ movl %ecx, 128(%eax)
+ movl %edx, 132(%eax)
+ addl $128, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end268:
+ .size mcl_fpDbl_add17Lbmi2, .Lfunc_end268-mcl_fpDbl_add17Lbmi2
+
+ .globl mcl_fpDbl_sub17Lbmi2
+ .align 16, 0x90
+ .type mcl_fpDbl_sub17Lbmi2,@function
+mcl_fpDbl_sub17Lbmi2: # @mcl_fpDbl_sub17Lbmi2
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $116, %esp
+ movl 140(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %edi
+ movl 144(%esp), %esi
+ subl (%esi), %eax
+ sbbl 4(%esi), %edi
+ movl 8(%edx), %ebx
+ sbbl 8(%esi), %ebx
+ movl 136(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edx), %eax
+ sbbl 12(%esi), %eax
+ movl %edi, 4(%ecx)
+ movl 16(%edx), %edi
+ sbbl 16(%esi), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%esi), %ebx
+ movl %eax, 12(%ecx)
+ movl 20(%edx), %eax
+ sbbl %ebx, %eax
+ movl 24(%esi), %ebx
+ movl %edi, 16(%ecx)
+ movl 24(%edx), %edi
+ sbbl %ebx, %edi
+ movl 28(%esi), %ebx
+ movl %eax, 20(%ecx)
+ movl 28(%edx), %eax
+ sbbl %ebx, %eax
+ movl 32(%esi), %ebx
+ movl %edi, 24(%ecx)
+ movl 32(%edx), %edi
+ sbbl %ebx, %edi
+ movl 36(%esi), %ebx
+ movl %eax, 28(%ecx)
+ movl 36(%edx), %eax
+ sbbl %ebx, %eax
+ movl 40(%esi), %ebx
+ movl %edi, 32(%ecx)
+ movl 40(%edx), %edi
+ sbbl %ebx, %edi
+ movl 44(%esi), %ebx
+ movl %eax, 36(%ecx)
+ movl 44(%edx), %eax
+ sbbl %ebx, %eax
+ movl 48(%esi), %ebx
+ movl %edi, 40(%ecx)
+ movl 48(%edx), %edi
+ sbbl %ebx, %edi
+ movl 52(%esi), %ebx
+ movl %eax, 44(%ecx)
+ movl 52(%edx), %eax
+ sbbl %ebx, %eax
+ movl 56(%esi), %ebx
+ movl %edi, 48(%ecx)
+ movl 56(%edx), %edi
+ sbbl %ebx, %edi
+ movl 60(%esi), %ebx
+ movl %eax, 52(%ecx)
+ movl 60(%edx), %eax
+ sbbl %ebx, %eax
+ movl 64(%esi), %ebx
+ movl %edi, 56(%ecx)
+ movl 64(%edx), %edi
+ sbbl %ebx, %edi
+ movl 68(%esi), %ebx
+ movl %eax, 60(%ecx)
+ movl 68(%edx), %eax
+ sbbl %ebx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 72(%esi), %eax
+ movl %edi, 64(%ecx)
+ movl 72(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 76(%esi), %eax
+ movl 76(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 80(%esi), %eax
+ movl 80(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 84(%esi), %eax
+ movl 84(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 88(%esi), %eax
+ movl 88(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 92(%esi), %eax
+ movl 92(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 96(%esi), %eax
+ movl 96(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 100(%esi), %eax
+ movl 100(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 104(%esi), %eax
+ movl 104(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 108(%esi), %eax
+ movl 108(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 112(%esi), %eax
+ movl 112(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 116(%esi), %eax
+ movl 116(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 120(%esi), %eax
+ movl 120(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 124(%esi), %eax
+ movl 124(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 128(%esi), %eax
+ movl 128(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 132(%esi), %eax
+ movl 132(%edx), %edx
+ sbbl %eax, %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 148(%esp), %ebp
+ jne .LBB269_1
+# BB#2:
+ movl $0, 76(%esp) # 4-byte Folded Spill
+ jmp .LBB269_3
+.LBB269_1:
+ movl 64(%ebp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+.LBB269_3:
+ testb %al, %al
+ jne .LBB269_4
+# BB#5:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ movl $0, %esi
+ jmp .LBB269_6
+.LBB269_4:
+ movl (%ebp), %esi
+ movl 4(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+.LBB269_6:
+ jne .LBB269_7
+# BB#8:
+ movl $0, 40(%esp) # 4-byte Folded Spill
+ jmp .LBB269_9
+.LBB269_7:
+ movl 60(%ebp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+.LBB269_9:
+ jne .LBB269_10
+# BB#11:
+ movl $0, 36(%esp) # 4-byte Folded Spill
+ jmp .LBB269_12
+.LBB269_10:
+ movl 56(%ebp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+.LBB269_12:
+ jne .LBB269_13
+# BB#14:
+ movl $0, 32(%esp) # 4-byte Folded Spill
+ jmp .LBB269_15
+.LBB269_13:
+ movl 52(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+.LBB269_15:
+ jne .LBB269_16
+# BB#17:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ jmp .LBB269_18
+.LBB269_16:
+ movl 48(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB269_18:
+ jne .LBB269_19
+# BB#20:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB269_21
+.LBB269_19:
+ movl 44(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB269_21:
+ jne .LBB269_22
+# BB#23:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB269_24
+.LBB269_22:
+ movl 40(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB269_24:
+ jne .LBB269_25
+# BB#26:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB269_27
+.LBB269_25:
+ movl 36(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB269_27:
+ jne .LBB269_28
+# BB#29:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB269_30
+.LBB269_28:
+ movl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB269_30:
+ jne .LBB269_31
+# BB#32:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB269_33
+.LBB269_31:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB269_33:
+ jne .LBB269_34
+# BB#35:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB269_36
+.LBB269_34:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB269_36:
+ jne .LBB269_37
+# BB#38:
+ movl $0, %ebx
+ jmp .LBB269_39
+.LBB269_37:
+ movl 20(%ebp), %ebx
+.LBB269_39:
+ jne .LBB269_40
+# BB#41:
+ movl $0, %edi
+ jmp .LBB269_42
+.LBB269_40:
+ movl 16(%ebp), %edi
+.LBB269_42:
+ jne .LBB269_43
+# BB#44:
+ movl %ebp, %eax
+ movl $0, %ebp
+ jmp .LBB269_45
+.LBB269_43:
+ movl %ebp, %eax
+ movl 12(%eax), %ebp
+.LBB269_45:
+ jne .LBB269_46
+# BB#47:
+ xorl %eax, %eax
+ jmp .LBB269_48
+.LBB269_46:
+ movl 8(%eax), %eax
+.LBB269_48:
+ addl 52(%esp), %esi # 4-byte Folded Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 68(%ecx)
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 72(%ecx)
+ adcl 56(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 76(%ecx)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %ebp, 80(%ecx)
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 84(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 88(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl 8(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 96(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 100(%ecx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 104(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 108(%ecx)
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 112(%ecx)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 116(%ecx)
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 120(%ecx)
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 124(%ecx)
+ movl %eax, 128(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 132(%ecx)
+ addl $116, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end269:
+ .size mcl_fpDbl_sub17Lbmi2, .Lfunc_end269-mcl_fpDbl_sub17Lbmi2
+
+
+ .section ".note.GNU-stack","",@progbits
diff --git a/vendor/github.com/tangerine-network/mcl/src/asm/x86.s b/vendor/github.com/tangerine-network/mcl/src/asm/x86.s
new file mode 100644
index 000000000..cdd988ad3
--- /dev/null
+++ b/vendor/github.com/tangerine-network/mcl/src/asm/x86.s
@@ -0,0 +1,73785 @@
+ .text
+ .file "<stdin>"
+ .globl makeNIST_P192L
+ .align 16, 0x90
+ .type makeNIST_P192L,@function
+makeNIST_P192L: # @makeNIST_P192L
+# BB#0:
+ movl 4(%esp), %eax
+ movl $-1, 20(%eax)
+ movl $-1, 16(%eax)
+ movl $-1, 12(%eax)
+ movl $-2, 8(%eax)
+ movl $-1, 4(%eax)
+ movl $-1, (%eax)
+ retl $4
+.Lfunc_end0:
+ .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L
+
+ .globl mcl_fpDbl_mod_NIST_P192L
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P192L,@function
+mcl_fpDbl_mod_NIST_P192L: # @mcl_fpDbl_mod_NIST_P192L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %eax
+ movl 32(%eax), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ xorl %edx, %edx
+ movl (%eax), %ebx
+ addl %ecx, %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ adcl %edi, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 8(%eax), %ebp
+ adcl %esi, %ebp
+ movl 36(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 12(%eax), %esi
+ adcl %ecx, %esi
+ movl 40(%eax), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 16(%eax), %ecx
+ adcl %ebx, %ecx
+ movl 44(%eax), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl 20(%eax), %eax
+ adcl %edi, %eax
+ adcl $0, %edx
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl %ebx, 24(%esp) # 4-byte Folded Spill
+ movl (%esp), %ebx # 4-byte Reload
+ adcl %ebx, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %edx
+ adcl $0, %edi
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ adcl $0, %eax
+ adcl $0, %edx
+ adcl $0, %edi
+ addl %edx, 24(%esp) # 4-byte Folded Spill
+ adcl %edi, 28(%esp) # 4-byte Folded Spill
+ adcl %ebp, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ adcl $0, %ecx
+ adcl $0, %eax
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 24(%esp), %esi # 4-byte Reload
+ addl $1, %esi
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $1, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %edi, %edx
+ adcl $0, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ adcl $0, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %edx
+ adcl $0, %edx
+ adcl $-1, %ebx
+ andl $1, %ebx
+ jne .LBB1_2
+# BB#1:
+ movl %edx, %eax
+.LBB1_2:
+ testb %bl, %bl
+ movl 24(%esp), %edx # 4-byte Reload
+ jne .LBB1_4
+# BB#3:
+ movl %esi, %edx
+.LBB1_4:
+ movl 52(%esp), %esi
+ movl %edx, (%esi)
+ movl 20(%esp), %edx # 4-byte Reload
+ movl 28(%esp), %ebx # 4-byte Reload
+ jne .LBB1_6
+# BB#5:
+ movl %ebp, %ebx
+.LBB1_6:
+ movl %ebx, 4(%esi)
+ jne .LBB1_8
+# BB#7:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB1_8:
+ movl %edx, 8(%esi)
+ jne .LBB1_10
+# BB#9:
+ movl 12(%esp), %edi # 4-byte Reload
+.LBB1_10:
+ movl %edi, 12(%esi)
+ jne .LBB1_12
+# BB#11:
+ movl 16(%esp), %ecx # 4-byte Reload
+.LBB1_12:
+ movl %ecx, 16(%esi)
+ movl %eax, 20(%esi)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end1:
+ .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L
+
+ .globl mcl_fp_sqr_NIST_P192L
+ .align 16, 0x90
+ .type mcl_fp_sqr_NIST_P192L,@function
+mcl_fp_sqr_NIST_P192L: # @mcl_fp_sqr_NIST_P192L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L2$pb
+.L2$pb:
+ popl %ebx
+.Ltmp0:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L2$pb), %ebx
+ movl 116(%esp), %eax
+ movl %eax, 4(%esp)
+ leal 44(%esp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_sqrPre6L@PLT
+ xorl %edi, %edi
+ movl 76(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 72(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %esi
+ addl %eax, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax
+ adcl %edx, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp
+ adcl %ecx, %ebp
+ movl 80(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %esi
+ adcl %eax, %esi
+ movl 84(%esp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx
+ adcl %ebx, %ecx
+ movl 88(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %edx
+ adcl %eax, %edx
+ adcl $0, %edi
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %ebx, 36(%esp) # 4-byte Folded Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 40(%esp) # 4-byte Folded Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %edi
+ adcl $0, %eax
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ adcl $0, %edx
+ adcl $0, %edi
+ adcl $0, %eax
+ addl %edi, 36(%esp) # 4-byte Folded Spill
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl %ebp, %edi
+ adcl %esi, %eax
+ adcl $0, %ecx
+ adcl $0, %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 36(%esp), %esi # 4-byte Reload
+ addl $1, %esi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ adcl $1, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl $0, %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ adcl $0, %ebp
+ adcl $-1, %ebx
+ andl $1, %ebx
+ jne .LBB2_2
+# BB#1:
+ movl %ebp, %edx
+.LBB2_2:
+ testb %bl, %bl
+ movl 36(%esp), %ebx # 4-byte Reload
+ jne .LBB2_4
+# BB#3:
+ movl %esi, %ebx
+.LBB2_4:
+ movl 112(%esp), %esi
+ movl %ebx, (%esi)
+ movl 40(%esp), %ebx # 4-byte Reload
+ jne .LBB2_6
+# BB#5:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB2_6:
+ movl %ebx, 4(%esi)
+ jne .LBB2_8
+# BB#7:
+ movl 24(%esp), %edi # 4-byte Reload
+.LBB2_8:
+ movl %edi, 8(%esi)
+ jne .LBB2_10
+# BB#9:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB2_10:
+ movl %eax, 12(%esi)
+ jne .LBB2_12
+# BB#11:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB2_12:
+ movl %ecx, 16(%esi)
+ movl %edx, 20(%esi)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end2:
+ .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L
+
+ .globl mcl_fp_mulNIST_P192L
+ .align 16, 0x90
+ .type mcl_fp_mulNIST_P192L,@function
+mcl_fp_mulNIST_P192L: # @mcl_fp_mulNIST_P192L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L3$pb
+.L3$pb:
+ popl %ebx
+.Ltmp1:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.L3$pb), %ebx
+ movl 120(%esp), %eax
+ movl %eax, 8(%esp)
+ movl 116(%esp), %eax
+ movl %eax, 4(%esp)
+ leal 44(%esp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6L@PLT
+ xorl %edi, %edi
+ movl 76(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 72(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %esi
+ addl %eax, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax
+ adcl %edx, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp
+ adcl %ecx, %ebp
+ movl 80(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %esi
+ adcl %eax, %esi
+ movl 84(%esp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx
+ adcl %ebx, %ecx
+ movl 88(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %edx
+ adcl %eax, %edx
+ adcl $0, %edi
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %ebx, 36(%esp) # 4-byte Folded Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 40(%esp) # 4-byte Folded Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %edi
+ adcl $0, %eax
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ adcl $0, %edx
+ adcl $0, %edi
+ adcl $0, %eax
+ addl %edi, 36(%esp) # 4-byte Folded Spill
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl %ebp, %edi
+ adcl %esi, %eax
+ adcl $0, %ecx
+ adcl $0, %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 36(%esp), %esi # 4-byte Reload
+ addl $1, %esi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ adcl $1, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl $0, %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ adcl $0, %ebp
+ adcl $-1, %ebx
+ andl $1, %ebx
+ jne .LBB3_2
+# BB#1:
+ movl %ebp, %edx
+.LBB3_2:
+ testb %bl, %bl
+ movl 36(%esp), %ebx # 4-byte Reload
+ jne .LBB3_4
+# BB#3:
+ movl %esi, %ebx
+.LBB3_4:
+ movl 112(%esp), %esi
+ movl %ebx, (%esi)
+ movl 40(%esp), %ebx # 4-byte Reload
+ jne .LBB3_6
+# BB#5:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB3_6:
+ movl %ebx, 4(%esi)
+ jne .LBB3_8
+# BB#7:
+ movl 24(%esp), %edi # 4-byte Reload
+.LBB3_8:
+ movl %edi, 8(%esi)
+ jne .LBB3_10
+# BB#9:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB3_10:
+ movl %eax, 12(%esi)
+ jne .LBB3_12
+# BB#11:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB3_12:
+ movl %ecx, 16(%esi)
+ movl %edx, 20(%esi)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end3:
+ .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L
+
+ .globl mcl_fpDbl_mod_NIST_P521L
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P521L,@function
+mcl_fpDbl_mod_NIST_P521L: # @mcl_fpDbl_mod_NIST_P521L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ecx
+ movl 124(%ecx), %edx
+ movl 128(%ecx), %esi
+ movl %esi, %eax
+ shldl $23, %edx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 120(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 116(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 112(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 108(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 104(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 100(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 92(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 84(%ecx), %edi
+ shldl $23, %edi, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 80(%ecx), %edx
+ shldl $23, %edx, %edi
+ movl 76(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl 72(%ecx), %ebx
+ shldl $23, %ebx, %eax
+ movl 68(%ecx), %ebp
+ shldl $23, %ebp, %ebx
+ shrl $9, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ shldl $23, %esi, %ebp
+ andl $511, %esi # imm = 0x1FF
+ addl (%ecx), %ebp
+ adcl 4(%ecx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ adcl 8(%ecx), %eax
+ adcl 12(%ecx), %edx
+ adcl 16(%ecx), %edi
+ movl 28(%esp), %ebx # 4-byte Reload
+ adcl 20(%ecx), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 24(%ecx), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl 28(%ecx), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl 32(%ecx), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 36(%ecx), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl 40(%ecx), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 24(%esp), %ebx # 4-byte Reload
+ adcl 44(%ecx), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 48(%ecx), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 20(%esp), %ebx # 4-byte Reload
+ adcl 52(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 56(%ecx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl 60(%ecx), %ebx
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ shrl $9, %ecx
+ andl $1, %ecx
+ addl %ebp, %ecx
+ adcl $0, 16(%esp) # 4-byte Folded Spill
+ adcl $0, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ebx, %ebp
+ adcl $0, %ebp
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %ecx, %edi
+ andl %eax, %edi
+ andl %edx, %edi
+ andl %esi, %edi
+ andl 28(%esp), %edi # 4-byte Folded Reload
+ andl 32(%esp), %edi # 4-byte Folded Reload
+ andl 36(%esp), %edi # 4-byte Folded Reload
+ andl 40(%esp), %edi # 4-byte Folded Reload
+ andl 44(%esp), %edi # 4-byte Folded Reload
+ andl 48(%esp), %edi # 4-byte Folded Reload
+ andl 24(%esp), %edi # 4-byte Folded Reload
+ andl 52(%esp), %edi # 4-byte Folded Reload
+ movl 20(%esp), %esi # 4-byte Reload
+ andl %esi, %edi
+ andl 56(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, %edx
+ movl 16(%esp), %ebx # 4-byte Reload
+ andl %ebp, %edi
+ movl %ebp, %eax
+ movl %edx, %ebp
+ orl $-512, %ebp # imm = 0xFFFFFFFFFFFFFE00
+ andl %edi, %ebp
+ andl %ebx, %ebp
+ cmpl $-1, %ebp
+ movl 80(%esp), %edi
+ je .LBB4_1
+# BB#3: # %nonzero
+ movl %ecx, (%edi)
+ movl %ebx, 4(%edi)
+ movl (%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%edi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%edi)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%edi)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%edi)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%edi)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%edi)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%edi)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%edi)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%edi)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%edi)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%edi)
+ movl %esi, 52(%edi)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%edi)
+ movl %eax, 60(%edi)
+ andl $511, %edx # imm = 0x1FF
+ movl %edx, 64(%edi)
+ jmp .LBB4_2
+.LBB4_1: # %zero
+ xorl %eax, %eax
+ movl $17, %ecx
+ rep;stosl
+.LBB4_2: # %zero
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end4:
+ .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L
+
+ .globl mcl_fp_mulUnitPre1L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre1L,@function
+mcl_fp_mulUnitPre1L: # @mcl_fp_mulUnitPre1L
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %eax
+ mull 12(%esp)
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edx, 4(%ecx)
+ retl
+.Lfunc_end5:
+ .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L
+
+ .globl mcl_fpDbl_mulPre1L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre1L,@function
+mcl_fpDbl_mulPre1L: # @mcl_fpDbl_mulPre1L
+# BB#0:
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ movl 8(%esp), %ecx
+ mull (%ecx)
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edx, 4(%ecx)
+ retl
+.Lfunc_end6:
+ .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L
+
+ .globl mcl_fpDbl_sqrPre1L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre1L,@function
+mcl_fpDbl_sqrPre1L: # @mcl_fpDbl_sqrPre1L
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %eax
+ mull %eax
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edx, 4(%ecx)
+ retl
+.Lfunc_end7:
+ .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L
+
+ .globl mcl_fp_mont1L
+ .align 16, 0x90
+ .type mcl_fp_mont1L,@function
+mcl_fp_mont1L: # @mcl_fp_mont1L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %eax
+ movl 20(%esp), %ecx
+ mull (%ecx)
+ movl %eax, %ecx
+ movl %edx, %esi
+ movl 24(%esp), %edx
+ movl -4(%edx), %eax
+ imull %ecx, %eax
+ movl (%edx), %edi
+ mull %edi
+ addl %ecx, %eax
+ adcl %esi, %edx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl %edx, %eax
+ subl %edi, %eax
+ sbbl $0, %ecx
+ testb $1, %cl
+ jne .LBB8_2
+# BB#1:
+ movl %eax, %edx
+.LBB8_2:
+ movl 12(%esp), %eax
+ movl %edx, (%eax)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end8:
+ .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L
+
+ .globl mcl_fp_montNF1L
+ .align 16, 0x90
+ .type mcl_fp_montNF1L,@function
+mcl_fp_montNF1L: # @mcl_fp_montNF1L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %eax
+ movl 20(%esp), %ecx
+ mull (%ecx)
+ movl %eax, %ecx
+ movl %edx, %esi
+ movl 24(%esp), %edx
+ movl -4(%edx), %eax
+ imull %ecx, %eax
+ movl (%edx), %edi
+ mull %edi
+ addl %ecx, %eax
+ adcl %esi, %edx
+ movl %edx, %eax
+ subl %edi, %eax
+ js .LBB9_2
+# BB#1:
+ movl %eax, %edx
+.LBB9_2:
+ movl 12(%esp), %eax
+ movl %edx, (%eax)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end9:
+ .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L
+
+ .globl mcl_fp_montRed1L
+ .align 16, 0x90
+ .type mcl_fp_montRed1L,@function
+mcl_fp_montRed1L: # @mcl_fp_montRed1L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %esi
+ movl 20(%esp), %edx
+ movl -4(%edx), %eax
+ imull %esi, %eax
+ movl (%edx), %edi
+ mull %edi
+ addl %esi, %eax
+ adcl 4(%ecx), %edx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl %edx, %eax
+ subl %edi, %eax
+ sbbl $0, %ecx
+ testb $1, %cl
+ jne .LBB10_2
+# BB#1:
+ movl %eax, %edx
+.LBB10_2:
+ movl 12(%esp), %eax
+ movl %edx, (%eax)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end10:
+ .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L
+
+ .globl mcl_fp_addPre1L
+ .align 16, 0x90
+ .type mcl_fp_addPre1L,@function
+mcl_fp_addPre1L: # @mcl_fp_addPre1L
+# BB#0:
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ movl 4(%esp), %ecx
+ movl 8(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, (%ecx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ retl
+.Lfunc_end11:
+ .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L
+
+ .globl mcl_fp_subPre1L
+ .align 16, 0x90
+ .type mcl_fp_subPre1L,@function
+mcl_fp_subPre1L: # @mcl_fp_subPre1L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ xorl %eax, %eax
+ movl 8(%esp), %edx
+ movl 16(%esp), %esi
+ subl (%esi), %ecx
+ movl %ecx, (%edx)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end12:
+ .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L
+
+ .globl mcl_fp_shr1_1L
+ .align 16, 0x90
+ .type mcl_fp_shr1_1L,@function
+mcl_fp_shr1_1L: # @mcl_fp_shr1_1L
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %eax
+ shrl %eax
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ retl
+.Lfunc_end13:
+ .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L
+
+ .globl mcl_fp_add1L
+ .align 16, 0x90
+ .type mcl_fp_add1L,@function
+mcl_fp_add1L: # @mcl_fp_add1L
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %eax
+ movl 8(%esp), %ecx
+ movl 12(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, (%ecx)
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 20(%esp), %esi
+ subl (%esi), %eax
+ sbbl $0, %edx
+ testb $1, %dl
+ jne .LBB14_2
+# BB#1: # %nocarry
+ movl %eax, (%ecx)
+.LBB14_2: # %carry
+ popl %esi
+ retl
+.Lfunc_end14:
+ .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L
+
+ .globl mcl_fp_addNF1L
+ .align 16, 0x90
+ .type mcl_fp_addNF1L,@function
+mcl_fp_addNF1L: # @mcl_fp_addNF1L
+# BB#0:
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ movl 8(%esp), %ecx
+ addl (%ecx), %eax
+ movl 16(%esp), %edx
+ movl %eax, %ecx
+ subl (%edx), %ecx
+ js .LBB15_2
+# BB#1:
+ movl %ecx, %eax
+.LBB15_2:
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ retl
+.Lfunc_end15:
+ .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L
+
+ .globl mcl_fp_sub1L
+ .align 16, 0x90
+ .type mcl_fp_sub1L,@function
+mcl_fp_sub1L: # @mcl_fp_sub1L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ xorl %edx, %edx
+ movl 8(%esp), %ecx
+ movl 16(%esp), %esi
+ subl (%esi), %eax
+ movl %eax, (%ecx)
+ sbbl $0, %edx
+ testb $1, %dl
+ jne .LBB16_2
+# BB#1: # %nocarry
+ popl %esi
+ retl
+.LBB16_2: # %carry
+ movl 20(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, (%ecx)
+ popl %esi
+ retl
+.Lfunc_end16:
+ .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L
+
+ .globl mcl_fp_subNF1L
+ .align 16, 0x90
+ .type mcl_fp_subNF1L,@function
+mcl_fp_subNF1L: # @mcl_fp_subNF1L
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %eax
+ movl 12(%esp), %ecx
+ subl (%ecx), %eax
+ movl %eax, %ecx
+ sarl $31, %ecx
+ movl 16(%esp), %edx
+ andl (%edx), %ecx
+ addl %eax, %ecx
+ movl 4(%esp), %eax
+ movl %ecx, (%eax)
+ retl
+.Lfunc_end17:
+ .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L
+
+ .globl mcl_fpDbl_add1L
+ .align 16, 0x90
+ .type mcl_fpDbl_add1L,@function
+mcl_fpDbl_add1L: # @mcl_fpDbl_add1L
+# BB#0:
+ pushl %ebx
+ pushl %esi
+ movl 20(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %eax
+ movl 16(%esp), %esi
+ addl (%esi), %edx
+ movl 12(%esp), %ecx
+ adcl 4(%esi), %eax
+ movl %edx, (%ecx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 24(%esp), %esi
+ movl %eax, %edx
+ subl (%esi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB18_2
+# BB#1:
+ movl %edx, %eax
+.LBB18_2:
+ movl %eax, 4(%ecx)
+ popl %esi
+ popl %ebx
+ retl
+.Lfunc_end18:
+ .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L
+
+ .globl mcl_fpDbl_sub1L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub1L,@function
+mcl_fpDbl_sub1L: # @mcl_fpDbl_sub1L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %eax
+ xorl %ecx, %ecx
+ movl 16(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %eax
+ movl 8(%esp), %edx
+ movl %esi, (%edx)
+ sbbl $0, %ecx
+ andl $1, %ecx
+ je .LBB19_2
+# BB#1:
+ movl 20(%esp), %ecx
+ movl (%ecx), %ecx
+.LBB19_2:
+ addl %eax, %ecx
+ movl %ecx, 4(%edx)
+ popl %esi
+ retl
+.Lfunc_end19:
+ .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L
+
+ .globl mcl_fp_mulUnitPre2L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre2L,@function
+mcl_fp_mulUnitPre2L: # @mcl_fp_mulUnitPre2L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl 20(%esp), %ebx
+ movl %ecx, %eax
+ mull 4(%ebx)
+ movl %edx, %esi
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull (%ebx)
+ movl 16(%esp), %ecx
+ movl %eax, (%ecx)
+ addl %edi, %edx
+ movl %edx, 4(%ecx)
+ adcl $0, %esi
+ movl %esi, 8(%ecx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end20:
+ .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L
+
+ .globl mcl_fpDbl_mulPre2L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre2L,@function
+mcl_fpDbl_mulPre2L: # @mcl_fpDbl_mulPre2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $8, %esp
+ movl 32(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edi
+ movl 36(%esp), %ebx
+ movl (%ebx), %esi
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, %ebp
+ movl 28(%esp), %edx
+ movl %eax, (%edx)
+ movl 4(%ebx), %ebx
+ movl %edi, %eax
+ mull %ebx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, %ecx
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull %esi
+ addl %ebp, %eax
+ adcl $0, %edx
+ addl %ebx, %eax
+ movl 28(%esp), %esi
+ movl %eax, 4(%esi)
+ adcl (%esp), %edx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %ecx, %edx
+ movl %edx, 8(%esi)
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esi)
+ addl $8, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end21:
+ .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L
+
+ .globl mcl_fpDbl_sqrPre2L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre2L,@function
+mcl_fpDbl_sqrPre2L: # @mcl_fpDbl_sqrPre2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %esi
+ movl %esi, %eax
+ mull %esi
+ movl %edx, %edi
+ movl %eax, %ebx
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull %ecx
+ movl 20(%esp), %ecx
+ movl %eax, (%ecx)
+ addl %ebp, %edx
+ movl %esi, %eax
+ adcl $0, %eax
+ addl %ebp, %edx
+ movl %edx, 4(%ecx)
+ adcl %ebx, %eax
+ sbbl %edx, %edx
+ andl $1, %edx
+ addl %esi, %eax
+ movl %eax, 8(%ecx)
+ adcl %edi, %edx
+ movl %edx, 12(%ecx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end22:
+ .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L
+
+ .globl mcl_fp_mont2L
+ .align 16, 0x90
+ .type mcl_fp_mont2L,@function
+mcl_fp_mont2L: # @mcl_fp_mont2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %ecx
+ movl (%ecx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 4(%ecx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx
+ movl (%ecx), %esi
+ mull %esi
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 64(%esp), %edx
+ movl -4(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull %ecx, %ebp
+ movl (%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 4(%edx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edx
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl 16(%esp), %eax # 4-byte Reload
+ mull %esi
+ addl 4(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %edx
+ addl (%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %ebx
+ addl 8(%esp), %edi # 4-byte Folded Reload
+ adcl %eax, %ebp
+ adcl %edx, %ebx
+ movl 60(%esp), %eax
+ movl 4(%eax), %ecx
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 16(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 12(%esp) # 4-byte Folded Reload
+ movl %eax, %ecx
+ movl %edx, %esi
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %edi
+ addl %ebp, %ecx
+ adcl %ebx, %esi
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ sbbl %ebx, %ebx
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ andl $1, %ebx
+ mull 20(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ mull 24(%esp) # 4-byte Folded Reload
+ addl 16(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %edx
+ addl %ecx, %ebp
+ adcl %esi, %eax
+ adcl %edi, %edx
+ adcl $0, %ebx
+ movl %eax, %esi
+ subl 20(%esp), %esi # 4-byte Folded Reload
+ movl %edx, %ecx
+ sbbl 24(%esp), %ecx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB23_2
+# BB#1:
+ movl %esi, %eax
+.LBB23_2:
+ movl 52(%esp), %esi
+ movl %eax, (%esi)
+ testb %bl, %bl
+ jne .LBB23_4
+# BB#3:
+ movl %ecx, %edx
+.LBB23_4:
+ movl %edx, 4(%esi)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end23:
+ .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L
+
+ .globl mcl_fp_montNF2L
+ .align 16, 0x90
+ .type mcl_fp_montNF2L,@function
+mcl_fp_montNF2L: # @mcl_fp_montNF2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 60(%esp), %ecx
+ movl (%ecx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 4(%ecx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx
+ movl (%ecx), %ebp
+ mull %ebp
+ movl %eax, %ebx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl -4(%eax), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ imull %ecx, %edi
+ movl (%eax), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, %edi
+ movl %eax, %ebp
+ addl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edi
+ addl %ebx, %esi
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl 64(%esp), %eax
+ movl 4(%eax), %ebx
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 20(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ movl %edx, %ecx
+ addl 16(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %ebp, %ebx
+ adcl %edi, %ecx
+ adcl $0, %esi
+ movl 24(%esp), %eax # 4-byte Reload
+ imull %ebx, %eax
+ movl %eax, %edi
+ mull 32(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %edi, %eax
+ movl 28(%esp), %edi # 4-byte Reload
+ mull %edi
+ addl %ebx, %ebp
+ adcl %ecx, %eax
+ adcl $0, %esi
+ addl 24(%esp), %eax # 4-byte Folded Reload
+ adcl %edx, %esi
+ movl %eax, %edx
+ subl 32(%esp), %edx # 4-byte Folded Reload
+ movl %esi, %ecx
+ sbbl %edi, %ecx
+ testl %ecx, %ecx
+ js .LBB24_2
+# BB#1:
+ movl %edx, %eax
+.LBB24_2:
+ movl 56(%esp), %edx
+ movl %eax, (%edx)
+ js .LBB24_4
+# BB#3:
+ movl %ecx, %esi
+.LBB24_4:
+ movl %esi, 4(%edx)
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end24:
+ .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L
+
+ .globl mcl_fp_montRed2L
+ .align 16, 0x90
+ .type mcl_fp_montRed2L,@function
+mcl_fp_montRed2L: # @mcl_fp_montRed2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 44(%esp), %eax
+ movl -4(%eax), %ecx
+ movl (%eax), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 40(%esp), %edx
+ movl (%edx), %ebp
+ movl %ebp, %edi
+ imull %ecx, %edi
+ movl 4(%eax), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %edx
+ movl %edx, %esi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ebx
+ movl %edx, %edi
+ addl 4(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %ebp, %eax
+ movl 40(%esp), %edx
+ movl 12(%edx), %eax
+ adcl 4(%edx), %edi
+ adcl 8(%edx), %esi
+ adcl $0, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ imull %edi, %ecx
+ andl $1, %ebx
+ movl %ecx, %eax
+ mull 8(%esp) # 4-byte Folded Reload
+ movl %edx, (%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 12(%esp) # 4-byte Folded Reload
+ addl (%esp), %eax # 4-byte Folded Reload
+ adcl $0, %edx
+ addl %edi, %ebp
+ adcl %esi, %eax
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %eax, %esi
+ subl 8(%esp), %esi # 4-byte Folded Reload
+ movl %edx, %ecx
+ sbbl 12(%esp), %ecx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB25_2
+# BB#1:
+ movl %esi, %eax
+.LBB25_2:
+ movl 36(%esp), %esi
+ movl %eax, (%esi)
+ testb %bl, %bl
+ jne .LBB25_4
+# BB#3:
+ movl %ecx, %edx
+.LBB25_4:
+ movl %edx, 4(%esi)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end25:
+ .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L
+
+ .globl mcl_fp_addPre2L
+ .align 16, 0x90
+ .type mcl_fp_addPre2L,@function
+mcl_fp_addPre2L: # @mcl_fp_addPre2L
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ movl 12(%esp), %edx
+ addl (%edx), %ecx
+ movl 8(%esp), %esi
+ adcl 4(%edx), %eax
+ movl %ecx, (%esi)
+ movl %eax, 4(%esi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end26:
+ .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L
+
+ .globl mcl_fp_subPre2L
+ .align 16, 0x90
+ .type mcl_fp_subPre2L,@function
+mcl_fp_subPre2L: # @mcl_fp_subPre2L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ xorl %eax, %eax
+ movl 16(%esp), %esi
+ subl (%esi), %ecx
+ sbbl 4(%esi), %edx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl %edx, 4(%esi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end27:
+ .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L
+
+ .globl mcl_fp_shr1_2L
+ .align 16, 0x90
+ .type mcl_fp_shr1_2L,@function
+mcl_fp_shr1_2L: # @mcl_fp_shr1_2L
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %ecx
+ movl 4(%esp), %edx
+ movl %ecx, (%edx)
+ shrl %eax
+ movl %eax, 4(%edx)
+ retl
+.Lfunc_end28:
+ .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L
+
+ .globl mcl_fp_add2L
+ .align 16, 0x90
+ .type mcl_fp_add2L,@function
+mcl_fp_add2L: # @mcl_fp_add2L
+# BB#0:
+ pushl %ebx
+ pushl %esi
+ movl 20(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %ecx
+ movl 16(%esp), %esi
+ addl (%esi), %eax
+ movl 12(%esp), %edx
+ adcl 4(%esi), %ecx
+ movl %eax, (%edx)
+ movl %ecx, 4(%edx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 24(%esp), %esi
+ subl (%esi), %eax
+ sbbl 4(%esi), %ecx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB29_2
+# BB#1: # %nocarry
+ movl %eax, (%edx)
+ movl %ecx, 4(%edx)
+.LBB29_2: # %carry
+ popl %esi
+ popl %ebx
+ retl
+.Lfunc_end29:
+ .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L
+
+ .globl mcl_fp_addNF2L
+ .align 16, 0x90
+ .type mcl_fp_addNF2L,@function
+mcl_fp_addNF2L: # @mcl_fp_addNF2L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ movl 16(%esp), %edx
+ addl (%edx), %ecx
+ adcl 4(%edx), %eax
+ movl 24(%esp), %edi
+ movl %ecx, %esi
+ subl (%edi), %esi
+ movl %eax, %edx
+ sbbl 4(%edi), %edx
+ testl %edx, %edx
+ js .LBB30_2
+# BB#1:
+ movl %esi, %ecx
+.LBB30_2:
+ movl 12(%esp), %esi
+ movl %ecx, (%esi)
+ js .LBB30_4
+# BB#3:
+ movl %edx, %eax
+.LBB30_4:
+ movl %eax, 4(%esi)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end30:
+ .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L
+
+ .globl mcl_fp_sub2L
+ .align 16, 0x90
+ .type mcl_fp_sub2L,@function
+mcl_fp_sub2L: # @mcl_fp_sub2L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ xorl %ebx, %ebx
+ movl 24(%esp), %edx
+ subl (%edx), %ecx
+ sbbl 4(%edx), %eax
+ movl 16(%esp), %edx
+ movl %ecx, (%edx)
+ movl %eax, 4(%edx)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB31_2
+# BB#1: # %carry
+ movl 28(%esp), %esi
+ movl 4(%esi), %edi
+ addl (%esi), %ecx
+ movl %ecx, (%edx)
+ adcl %eax, %edi
+ movl %edi, 4(%edx)
+.LBB31_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end31:
+ .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L
+
+ .globl mcl_fp_subNF2L
+ .align 16, 0x90
+ .type mcl_fp_subNF2L,@function
+mcl_fp_subNF2L: # @mcl_fp_subNF2L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ movl 20(%esp), %edx
+ subl (%edx), %ecx
+ sbbl 4(%edx), %eax
+ movl %eax, %edx
+ sarl $31, %edx
+ movl 24(%esp), %esi
+ movl 4(%esi), %edi
+ andl %edx, %edi
+ andl (%esi), %edx
+ addl %ecx, %edx
+ movl 12(%esp), %ecx
+ movl %edx, (%ecx)
+ adcl %eax, %edi
+ movl %edi, 4(%ecx)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end32:
+ .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L
+
+ .globl mcl_fpDbl_add2L
+ .align 16, 0x90
+ .type mcl_fpDbl_add2L,@function
+mcl_fpDbl_add2L: # @mcl_fpDbl_add2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %edx
+ movl 12(%edx), %esi
+ movl 24(%esp), %edi
+ movl 12(%edi), %eax
+ movl 8(%edx), %ecx
+ movl (%edx), %ebx
+ movl 4(%edx), %ebp
+ addl (%edi), %ebx
+ adcl 4(%edi), %ebp
+ movl 20(%esp), %edx
+ adcl 8(%edi), %ecx
+ movl %ebx, (%edx)
+ movl %ebp, 4(%edx)
+ adcl %esi, %eax
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 32(%esp), %ebp
+ movl %ecx, %esi
+ subl (%ebp), %esi
+ movl %eax, %edi
+ sbbl 4(%ebp), %edi
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB33_2
+# BB#1:
+ movl %edi, %eax
+.LBB33_2:
+ testb %bl, %bl
+ jne .LBB33_4
+# BB#3:
+ movl %esi, %ecx
+.LBB33_4:
+ movl %ecx, 8(%edx)
+ movl %eax, 12(%edx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end33:
+ .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L
+
+ .globl mcl_fpDbl_sub2L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub2L,@function
+mcl_fpDbl_sub2L: # @mcl_fpDbl_sub2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %ebx, %ebx
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %eax
+ sbbl 8(%edx), %eax
+ movl 12(%edx), %ebp
+ movl 12(%ecx), %edx
+ movl 20(%esp), %ecx
+ movl %esi, (%ecx)
+ movl %edi, 4(%ecx)
+ sbbl %ebp, %edx
+ movl 32(%esp), %edi
+ movl (%edi), %esi
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB34_1
+# BB#2:
+ xorl %edi, %edi
+ jmp .LBB34_3
+.LBB34_1:
+ movl 4(%edi), %edi
+.LBB34_3:
+ testb %bl, %bl
+ jne .LBB34_5
+# BB#4:
+ xorl %esi, %esi
+.LBB34_5:
+ addl %eax, %esi
+ movl %esi, 8(%ecx)
+ adcl %edx, %edi
+ movl %edi, 12(%ecx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end34:
+ .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L
+
+ .globl mcl_fp_mulUnitPre3L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre3L,@function
+mcl_fp_mulUnitPre3L: # @mcl_fp_mulUnitPre3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ pushl %eax
+ movl 32(%esp), %ecx
+ movl 28(%esp), %edi
+ movl %ecx, %eax
+ mull 8(%edi)
+ movl %edx, %esi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 4(%edi)
+ movl %edx, %ebx
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull (%edi)
+ movl 24(%esp), %ecx
+ movl %eax, (%ecx)
+ addl %ebp, %edx
+ movl %edx, 4(%ecx)
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%ecx)
+ adcl $0, %esi
+ movl %esi, 12(%ecx)
+ addl $4, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end35:
+ .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L
+
+ .globl mcl_fpDbl_mulPre3L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre3L,@function
+mcl_fpDbl_mulPre3L: # @mcl_fpDbl_mulPre3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 52(%esp), %ecx
+ movl (%ecx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %edx
+ movl (%edx), %edi
+ mull %edi
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %edx
+ movl %eax, (%edx)
+ movl 4(%ecx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl 8(%ecx), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %edi
+ movl %edx, %ecx
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, %edi
+ movl %eax, %ebx
+ addl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 56(%esp), %eax
+ movl 4(%eax), %ecx
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 4(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ movl 48(%esp), %ecx
+ movl %eax, 4(%ecx)
+ adcl %edi, %ebp
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl 56(%esp), %eax
+ movl 8(%eax), %edi
+ sbbl %ecx, %ecx
+ movl (%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 8(%esp), %eax # 4-byte Reload
+ mull %edi
+ andl $1, %ecx
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ addl %ebx, %ebp
+ movl 48(%esp), %edi
+ movl %ebp, 8(%edi)
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl %eax, %ecx
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %esi, 12(%edi)
+ movl %ecx, 16(%edi)
+ adcl %edx, %eax
+ movl %eax, 20(%edi)
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end36:
+ .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L
+
+ .globl mcl_fpDbl_sqrPre3L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre3L,@function
+mcl_fpDbl_sqrPre3L: # @mcl_fpDbl_sqrPre3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %eax
+ movl 8(%eax), %ebp
+ movl (%eax), %ecx
+ movl 4(%eax), %esi
+ movl %ebp, %eax
+ mull %esi
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %esi
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull %ecx
+ movl %edx, %ecx
+ movl 52(%esp), %edx
+ movl %eax, (%edx)
+ movl %ebp, %eax
+ mull %ebp
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl %edi, %ecx
+ movl %esi, %ebp
+ adcl %ebx, %ebp
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, %eax
+ adcl $0, %eax
+ addl %edi, %ecx
+ movl 52(%esp), %edx
+ movl %ecx, 4(%edx)
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl %edx, %eax
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl %esi, %ebp
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl %edi, %ecx
+ addl (%esp), %ebp # 4-byte Folded Reload
+ movl 52(%esp), %esi
+ movl %ebp, 8(%esi)
+ adcl %edx, %eax
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl %ebx, %eax
+ adcl %edi, %ecx
+ movl 52(%esp), %edx
+ movl %eax, 12(%edx)
+ movl %ecx, 16(%edx)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%edx)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end37:
+ .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L
+
+ .globl mcl_fp_mont3L
+ .align 16, 0x90
+ .type mcl_fp_mont3L,@function
+mcl_fp_mont3L: # @mcl_fp_mont3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 80(%esp), %ecx
+ movl (%ecx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 84(%esp), %edx
+ movl (%edx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ mull %edx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 88(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull %edx, %ebp
+ movl (%esi), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 8(%esi), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 4(%esi), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 4(%ecx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 8(%ecx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ebx
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl 12(%esp), %ecx # 4-byte Reload
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, %ecx
+ addl 16(%esp), %ecx # 4-byte Folded Reload
+ adcl %edi, %edx
+ adcl $0, %esi
+ addl (%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 4(%esp), %edi # 4-byte Reload
+ addl 32(%esp), %edi # 4-byte Folded Reload
+ adcl %ecx, %ebp
+ adcl %edx, %ebx
+ adcl %esi, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 4(%eax), %ecx
+ movl %ecx, %eax
+ mull 20(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 36(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ addl 12(%esp), %edi # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %eax, %edx
+ addl %ebp, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %edx, %esi
+ imull 52(%esp), %esi # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 40(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ addl 4(%esp), %ecx # 4-byte Folded Reload
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ adcl %edi, %ecx
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax
+ movl 8(%eax), %esi
+ movl %esi, %eax
+ mull 20(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 36(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edx, %esi
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 36(%esp), %edx # 4-byte Reload
+ addl %ecx, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %ebx, %esi
+ adcl %ebp, %edi
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ movl 52(%esp), %ebp # 4-byte Reload
+ imull %edx, %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ andl $1, %ecx
+ movl %ebp, %eax
+ mull 40(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ mull 44(%esp) # 4-byte Folded Reload
+ addl 28(%esp), %eax # 4-byte Folded Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl %esi, %eax
+ adcl %edi, %edx
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %ecx
+ movl %eax, %ebx
+ subl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, %edi
+ sbbl 44(%esp), %edi # 4-byte Folded Reload
+ movl %ebp, %esi
+ sbbl 48(%esp), %esi # 4-byte Folded Reload
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB38_2
+# BB#1:
+ movl %ebx, %eax
+.LBB38_2:
+ movl 76(%esp), %ebx
+ movl %eax, (%ebx)
+ testb %cl, %cl
+ jne .LBB38_4
+# BB#3:
+ movl %edi, %edx
+.LBB38_4:
+ movl %edx, 4(%ebx)
+ jne .LBB38_6
+# BB#5:
+ movl %esi, %ebp
+.LBB38_6:
+ movl %ebp, 8(%ebx)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end38:
+ .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L
+
+ .globl mcl_fp_montNF3L
+ .align 16, 0x90
+ .type mcl_fp_montNF3L,@function
+mcl_fp_montNF3L: # @mcl_fp_montNF3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ebp
+ movl (%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx
+ movl (%ecx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ imull %edx, %ecx
+ movl (%esi), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 8(%esi), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 4(%esi), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 4(%ebp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 8(%ebp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %esi, %eax
+ movl 20(%esp), %ecx # 4-byte Reload
+ mull %ecx
+ movl %edx, %edi
+ movl %eax, %ebx
+ movl 40(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, %ecx
+ movl %eax, %esi
+ addl 36(%esp), %esi # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ adcl $0, %edi
+ addl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 4(%esp), %esi # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl 88(%esp), %eax
+ movl 4(%eax), %ebx
+ movl %ebx, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 40(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 28(%esp) # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ addl 16(%esp), %ebx # 4-byte Folded Reload
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %ebp
+ movl 20(%esp), %edx # 4-byte Reload
+ addl %esi, %edx
+ adcl %ecx, %ebx
+ adcl %edi, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %edx, %ecx
+ movl %edx, %edi
+ imull 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ addl %edi, %eax
+ adcl %ebx, %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl %edx, %esi
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl 88(%esp), %eax
+ movl 8(%eax), %edi
+ movl %edi, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 40(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 28(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %ebx
+ addl %esi, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ adcl %ebp, %ecx
+ adcl $0, %ebx
+ movl 56(%esp), %esi # 4-byte Reload
+ imull %eax, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %esi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ mull 48(%esp) # 4-byte Folded Reload
+ addl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl %edi, %eax
+ adcl %ecx, %esi
+ adcl $0, %ebx
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ adcl %edx, %esi
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %eax, %edi
+ subl 44(%esp), %edi # 4-byte Folded Reload
+ movl %esi, %edx
+ sbbl 48(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, %ecx
+ sbbl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ js .LBB39_2
+# BB#1:
+ movl %edi, %eax
+.LBB39_2:
+ movl 80(%esp), %edi
+ movl %eax, (%edi)
+ js .LBB39_4
+# BB#3:
+ movl %edx, %esi
+.LBB39_4:
+ movl %esi, 4(%edi)
+ js .LBB39_6
+# BB#5:
+ movl %ecx, %ebx
+.LBB39_6:
+ movl %ebx, 8(%edi)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end39:
+ .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L
+
+ .globl mcl_fp_montRed3L
+ .align 16, 0x90
+ .type mcl_fp_montRed3L,@function
+mcl_fp_montRed3L: # @mcl_fp_montRed3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 68(%esp), %eax
+ movl -4(%eax), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl (%eax), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx
+ movl (%ebx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ imull %edx, %ecx
+ movl 8(%eax), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 4(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, %ecx
+ addl %ebp, %ecx
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 16(%esp), %eax # 4-byte Folded Reload
+ adcl 4(%ebx), %ecx
+ adcl 8(%ebx), %esi
+ adcl 12(%ebx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ movl 16(%ebx), %edx
+ adcl $0, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl %ecx, %edi
+ imull 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 32(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 28(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ addl (%esp), %edi # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ecx, %eax
+ adcl %esi, %edi
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl $0, 12(%esp) # 4-byte Folded Spill
+ adcl $0, %ebx
+ movl 36(%esp), %ecx # 4-byte Reload
+ imull %edi, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 32(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ mull 28(%esp) # 4-byte Folded Reload
+ addl 8(%esp), %eax # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %edi, %ecx
+ adcl %ebp, %eax
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %eax, %ebp
+ subl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, %edi
+ sbbl 28(%esp), %edi # 4-byte Folded Reload
+ movl %esi, %ecx
+ sbbl 32(%esp), %ecx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB40_2
+# BB#1:
+ movl %ebp, %eax
+.LBB40_2:
+ movl 60(%esp), %ebp
+ movl %eax, (%ebp)
+ testb %bl, %bl
+ jne .LBB40_4
+# BB#3:
+ movl %edi, %edx
+.LBB40_4:
+ movl %edx, 4(%ebp)
+ jne .LBB40_6
+# BB#5:
+ movl %ecx, %esi
+.LBB40_6:
+ movl %esi, 8(%ebp)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end40:
+ .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L
+
+ .globl mcl_fp_addPre3L
+ .align 16, 0x90
+ .type mcl_fp_addPre3L,@function
+mcl_fp_addPre3L: # @mcl_fp_addPre3L
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 12(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %eax
+ adcl 8(%esi), %eax
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl %edx, 4(%esi)
+ movl %eax, 8(%esi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end41:
+ .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L
+
+ .globl mcl_fp_subPre3L
+ .align 16, 0x90
+ .type mcl_fp_subPre3L,@function
+mcl_fp_subPre3L: # @mcl_fp_subPre3L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 20(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl 12(%esp), %edi
+ movl %edx, (%edi)
+ movl %esi, 4(%edi)
+ movl %ecx, 8(%edi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end42:
+ .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L
+
+ .globl mcl_fp_shr1_3L
+ .align 16, 0x90
+ .type mcl_fp_shr1_3L,@function
+mcl_fp_shr1_3L: # @mcl_fp_shr1_3L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl 8(%eax), %ecx
+ movl (%eax), %edx
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl 8(%esp), %esi
+ movl %edx, (%esi)
+ shrdl $1, %ecx, %eax
+ movl %eax, 4(%esi)
+ shrl %ecx
+ movl %ecx, 8(%esi)
+ popl %esi
+ retl
+.Lfunc_end43:
+ .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L
+
+ .globl mcl_fp_add3L
+ .align 16, 0x90
+ .type mcl_fp_add3L,@function
+mcl_fp_add3L: # @mcl_fp_add3L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %ecx
+ movl 20(%esp), %esi
+ addl (%esi), %eax
+ adcl 4(%esi), %ecx
+ movl 8(%edx), %edx
+ adcl 8(%esi), %edx
+ movl 16(%esp), %esi
+ movl %eax, (%esi)
+ movl %ecx, 4(%esi)
+ movl %edx, 8(%esi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 28(%esp), %edi
+ subl (%edi), %eax
+ sbbl 4(%edi), %ecx
+ sbbl 8(%edi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB44_2
+# BB#1: # %nocarry
+ movl %eax, (%esi)
+ movl %ecx, 4(%esi)
+ movl %edx, 8(%esi)
+.LBB44_2: # %carry
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end44:
+ .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L
+
+ .globl mcl_fp_addNF3L
+ .align 16, 0x90
+ .type mcl_fp_addNF3L,@function
+mcl_fp_addNF3L: # @mcl_fp_addNF3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 24(%esp), %esi
+ addl (%esi), %edx
+ adcl 4(%esi), %ecx
+ movl 8(%eax), %eax
+ adcl 8(%esi), %eax
+ movl 32(%esp), %ebp
+ movl %edx, %ebx
+ subl (%ebp), %ebx
+ movl %ecx, %edi
+ sbbl 4(%ebp), %edi
+ movl %eax, %esi
+ sbbl 8(%ebp), %esi
+ movl %esi, %ebp
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ js .LBB45_2
+# BB#1:
+ movl %ebx, %edx
+.LBB45_2:
+ movl 20(%esp), %ebx
+ movl %edx, (%ebx)
+ js .LBB45_4
+# BB#3:
+ movl %edi, %ecx
+.LBB45_4:
+ movl %ecx, 4(%ebx)
+ js .LBB45_6
+# BB#5:
+ movl %esi, %eax
+.LBB45_6:
+ movl %eax, 8(%ebx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end45:
+ .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L
+
+ .globl mcl_fp_sub3L
+ .align 16, 0x90
+ .type mcl_fp_sub3L,@function
+mcl_fp_sub3L: # @mcl_fp_sub3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %edx
+ movl (%edx), %ecx
+ movl 4(%edx), %eax
+ xorl %ebx, %ebx
+ movl 28(%esp), %esi
+ subl (%esi), %ecx
+ sbbl 4(%esi), %eax
+ movl 8(%edx), %edx
+ sbbl 8(%esi), %edx
+ movl 20(%esp), %esi
+ movl %ecx, (%esi)
+ movl %eax, 4(%esi)
+ movl %edx, 8(%esi)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB46_2
+# BB#1: # %carry
+ movl 32(%esp), %edi
+ movl 4(%edi), %ebx
+ movl 8(%edi), %ebp
+ addl (%edi), %ecx
+ movl %ecx, (%esi)
+ adcl %eax, %ebx
+ movl %ebx, 4(%esi)
+ adcl %edx, %ebp
+ movl %ebp, 8(%esi)
+.LBB46_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end46:
+ .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L
+
+ .globl mcl_fp_subNF3L
+ .align 16, 0x90
+ .type mcl_fp_subNF3L,@function
+mcl_fp_subNF3L: # @mcl_fp_subNF3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 28(%esp), %esi
+ subl (%esi), %ecx
+ sbbl 4(%esi), %edx
+ movl 8(%eax), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, %esi
+ sarl $31, %esi
+ movl %esi, %edi
+ shldl $1, %eax, %edi
+ movl 32(%esp), %ebx
+ andl (%ebx), %edi
+ movl 8(%ebx), %ebp
+ andl %esi, %ebp
+ andl 4(%ebx), %esi
+ addl %ecx, %edi
+ adcl %edx, %esi
+ movl 20(%esp), %ecx
+ movl %edi, (%ecx)
+ movl %esi, 4(%ecx)
+ adcl %eax, %ebp
+ movl %ebp, 8(%ecx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end47:
+ .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L
+
+ .globl mcl_fpDbl_add3L
+ .align 16, 0x90
+ .type mcl_fpDbl_add3L,@function
+mcl_fpDbl_add3L: # @mcl_fpDbl_add3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ pushl %eax
+ movl 32(%esp), %esi
+ movl 20(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 16(%esi), %edi
+ movl 12(%esi), %ebx
+ movl (%esi), %edx
+ movl 28(%esp), %eax
+ addl (%eax), %edx
+ movl 24(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%esi), %edx
+ movl 4(%esi), %esi
+ adcl 4(%eax), %esi
+ adcl 8(%eax), %edx
+ movl %esi, 4(%ecx)
+ movl 20(%eax), %ebp
+ movl %edx, 8(%ecx)
+ movl 12(%eax), %esi
+ movl 16(%eax), %edx
+ adcl %ebx, %esi
+ adcl %edi, %edx
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 36(%esp), %ecx
+ movl %esi, %ebx
+ subl (%ecx), %ebx
+ movl %edx, %edi
+ sbbl 4(%ecx), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl %ebp, %ecx
+ movl 36(%esp), %edi
+ sbbl 8(%edi), %ecx
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB48_2
+# BB#1:
+ movl %ecx, %ebp
+.LBB48_2:
+ testb %al, %al
+ jne .LBB48_4
+# BB#3:
+ movl %ebx, %esi
+.LBB48_4:
+ movl 24(%esp), %eax
+ movl %esi, 12(%eax)
+ jne .LBB48_6
+# BB#5:
+ movl (%esp), %edx # 4-byte Reload
+.LBB48_6:
+ movl %edx, 16(%eax)
+ movl %ebp, 20(%eax)
+ addl $4, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end48:
+ .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L
+
+ .globl mcl_fpDbl_sub3L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub3L,@function
+mcl_fpDbl_sub3L: # @mcl_fpDbl_sub3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ movl 28(%esp), %ebx
+ subl (%ebx), %edx
+ sbbl 4(%ebx), %esi
+ movl 8(%ecx), %ebp
+ sbbl 8(%ebx), %ebp
+ movl 20(%esp), %eax
+ movl %edx, (%eax)
+ movl 12(%ecx), %edi
+ sbbl 12(%ebx), %edi
+ movl %esi, 4(%eax)
+ movl 16(%ecx), %esi
+ sbbl 16(%ebx), %esi
+ movl 20(%ebx), %ebx
+ movl 20(%ecx), %edx
+ movl %ebp, 8(%eax)
+ sbbl %ebx, %edx
+ movl $0, %ecx
+ sbbl $0, %ecx
+ andl $1, %ecx
+ movl 32(%esp), %ebp
+ jne .LBB49_1
+# BB#2:
+ xorl %ebx, %ebx
+ jmp .LBB49_3
+.LBB49_1:
+ movl 8(%ebp), %ebx
+.LBB49_3:
+ testb %cl, %cl
+ movl $0, %eax
+ jne .LBB49_4
+# BB#5:
+ xorl %ecx, %ecx
+ jmp .LBB49_6
+.LBB49_4:
+ movl (%ebp), %ecx
+ movl 4(%ebp), %eax
+.LBB49_6:
+ addl %edi, %ecx
+ adcl %esi, %eax
+ movl 20(%esp), %esi
+ movl %ecx, 12(%esi)
+ movl %eax, 16(%esi)
+ adcl %edx, %ebx
+ movl %ebx, 20(%esi)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end49:
+ .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L
+
+ .globl mcl_fp_mulUnitPre4L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre4L,@function
+mcl_fp_mulUnitPre4L: # @mcl_fp_mulUnitPre4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $12, %esp
+ movl 40(%esp), %ecx
+ movl 36(%esp), %ebp
+ movl %ecx, %eax
+ mull 12(%ebp)
+ movl %edx, %esi
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 8(%ebp)
+ movl %edx, %ebx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 4(%ebp)
+ movl %edx, %edi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull (%ebp)
+ movl 32(%esp), %ecx
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%ecx)
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%ecx)
+ adcl $0, %esi
+ movl %esi, 16(%ecx)
+ addl $12, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end50:
+ .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L
+
+ .globl mcl_fpDbl_mulPre4L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre4L,@function
+mcl_fpDbl_mulPre4L: # @mcl_fpDbl_mulPre4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 80(%esp), %edi
+ movl (%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx
+ movl (%ecx), %esi
+ movl %ecx, %ebp
+ mull %esi
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 4(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 8(%edi), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 12(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 4(%ebp), %ecx
+ movl %eax, %ebp
+ mull %ecx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %esi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ebx, %eax
+ mull %esi
+ movl %edx, %ecx
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull %esi
+ movl %edx, %edi
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ adcl %ebx, %edi
+ adcl %ebp, %ecx
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl (%esp), %eax # 4-byte Folded Reload
+ movl 76(%esp), %edx
+ movl %eax, 4(%edx)
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 8(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 8(%eax), %esi
+ movl 20(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 40(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 28(%esp) # 4-byte Spill
+ addl %edi, %eax
+ movl 76(%esp), %edx
+ movl %eax, 8(%edx)
+ adcl %ecx, %ebp
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax
+ movl 12(%eax), %esi
+ sbbl %ecx, %ecx
+ movl %esi, %eax
+ movl 80(%esp), %edi
+ mull 12(%edi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 8(%edi)
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 4(%edi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %esi, %eax
+ movl 80(%esp), %edx
+ mull (%edx)
+ movl %eax, %esi
+ andl $1, %ecx
+ addl 28(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ addl %esi, %ebp
+ movl 76(%esp), %esi
+ movl %ebp, 12(%esi)
+ adcl %edi, %ebx
+ movl %eax, %edi
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %edx, %ebx
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %esi, %edx
+ movl %ebx, 16(%edx)
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 20(%edx)
+ movl %ecx, 24(%edx)
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%edx)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end51:
+ .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L
+
+ .globl mcl_fpDbl_sqrPre4L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre4L,@function
+mcl_fpDbl_sqrPre4L: # @mcl_fpDbl_sqrPre4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %ecx
+ movl 12(%ecx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl (%ecx), %ebx
+ movl 4(%ecx), %esi
+ movl %ebp, %eax
+ mull %esi
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 8(%ecx), %edi
+ movl %edi, %eax
+ mull %esi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ebx
+ movl %edx, %ebp
+ movl %eax, %ecx
+ movl %edi, %eax
+ mull %ebx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %esi
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, (%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebx, %eax
+ mull %ebx
+ movl 60(%esp), %ebx
+ movl %eax, (%ebx)
+ addl %esi, %edx
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, %ebx
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl %esi, %edx
+ movl 60(%esp), %esi
+ movl %edx, 4(%esi)
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl %eax, %ebx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %edi
+ movl %eax, %edi
+ addl 20(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %eax
+ movl %ebx, 8(%eax)
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl %ebp, %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl %esi, %eax
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl 28(%esp), %ecx # 4-byte Folded Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl %edx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %esi
+ movl 12(%esi), %ebp
+ movl %ebp, %eax
+ mull 8(%esi)
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 4(%esi)
+ movl %esi, %edi
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebp, %eax
+ mull (%edi)
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull %ebp
+ addl %ecx, %edi
+ movl 60(%esp), %ebp
+ movl %edi, 12(%ebp)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, %edi
+ movl %esi, 16(%edi)
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%edi)
+ movl %eax, 24(%edi)
+ adcl %edx, %ecx
+ movl %ecx, 28(%edi)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end52:
+ .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L
+
+ .globl mcl_fp_mont4L
+ .align 16, 0x90
+ .type mcl_fp_mont4L,@function
+mcl_fp_mont4L: # @mcl_fp_mont4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 112(%esp), %ecx
+ movl (%ecx), %eax
+ movl %ecx, %ebp
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 116(%esp), %edx
+ movl (%edx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ mull %edx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 120(%esp), %edi
+ movl -4(%edi), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ imull %edx, %ebx
+ movl (%edi), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 12(%edi), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 8(%edi), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 4(%edi), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ movl 4(%edi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 12(%edi), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 8(%edi), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %esi
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 28(%esp), %esi # 4-byte Reload
+ mull %esi
+ movl %edx, %ecx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %esi
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, %ebx
+ movl %eax, %edi
+ addl 32(%esp), %edi # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 28(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ addl 8(%esp), %esi # 4-byte Folded Reload
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 12(%esp), %ebp # 4-byte Reload
+ addl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl %edi, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl %ebx, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebx
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ movl 116(%esp), %eax
+ movl 4(%eax), %esi
+ movl %esi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, %edx
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl %ebp, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %eax, %ecx
+ imull 80(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %esi
+ movl %ecx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ addl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 28(%esp), %eax # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl 8(%eax), %esi
+ movl %esi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ addl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, %eax
+ adcl $0, %eax
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl %edi, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl %ebp, 32(%esp) # 4-byte Folded Spill
+ adcl %ebx, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %ecx, %esi
+ imull 80(%esp), %esi # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, %ecx
+ movl %esi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ addl %edi, %esi
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl 12(%eax), %ebp
+ movl %ebp, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edi, %eax
+ adcl $0, %eax
+ movl 64(%esp), %edi # 4-byte Reload
+ addl %esi, %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl %ebx, %ebp
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ movl 80(%esp), %esi # 4-byte Reload
+ imull %edi, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ andl $1, %ebx
+ movl %esi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %esi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ mull 68(%esp) # 4-byte Folded Reload
+ addl 44(%esp), %eax # 4-byte Folded Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 64(%esp), %ecx # 4-byte Folded Reload
+ adcl %ebp, %eax
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %eax, %ebp
+ subl 84(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, %ecx
+ sbbl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ sbbl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %edi, %ecx
+ sbbl 76(%esp), %ecx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB53_2
+# BB#1:
+ movl %ebp, %eax
+.LBB53_2:
+ movl 108(%esp), %ebp
+ movl %eax, (%ebp)
+ testb %bl, %bl
+ jne .LBB53_4
+# BB#3:
+ movl 80(%esp), %edx # 4-byte Reload
+.LBB53_4:
+ movl %edx, 4(%ebp)
+ jne .LBB53_6
+# BB#5:
+ movl 84(%esp), %esi # 4-byte Reload
+.LBB53_6:
+ movl %esi, 8(%ebp)
+ jne .LBB53_8
+# BB#7:
+ movl %ecx, %edi
+.LBB53_8:
+ movl %edi, 12(%ebp)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end53:
+ .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L
+
+ .globl mcl_fp_montNF4L
+ .align 16, 0x90
+ .type mcl_fp_montNF4L,@function
+mcl_fp_montNF4L: # @mcl_fp_montNF4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %ecx
+ movl (%ecx), %eax
+ movl %ecx, %ebp
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx
+ movl (%ecx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ imull %edx, %ecx
+ movl (%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 12(%esi), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 8(%esi), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 4(%esi), %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 4(%eax), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 12(%eax), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 8(%eax), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ movl 36(%esp), %ebx # 4-byte Reload
+ mull %ebx
+ movl %edx, %ecx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %ebx, %esi
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, %esi
+ movl %eax, %ebp
+ addl 76(%esp), %ebp # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ecx
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ecx
+ addl 12(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 4(%eax), %edi
+ movl %edi, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, %edi
+ addl 20(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %ebp, 32(%esp) # 4-byte Folded Spill
+ adcl %esi, %edi
+ adcl %ebx, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ movl %esi, %ecx
+ imull 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ addl %esi, %eax
+ adcl %edi, %ebx
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edx, %ebx
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 28(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %ecx, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ addl %edi, %ecx
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %esi
+ movl 32(%esp), %edx # 4-byte Reload
+ addl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %edx, %ebx
+ imull 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ addl 32(%esp), %eax # 4-byte Folded Reload
+ adcl %ecx, %edi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl %ebp, %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %edx, %edi
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 112(%esp), %eax
+ movl 12(%eax), %ecx
+ movl %ecx, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl 60(%esp), %edx # 4-byte Reload
+ addl %edi, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl %esi, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl 56(%esp), %edi # 4-byte Reload
+ imull %edx, %edi
+ movl %edi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl %edi, %ebp
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ addl 60(%esp), %ebp # 4-byte Folded Reload
+ adcl %ecx, %eax
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %ebx
+ addl 56(%esp), %eax # 4-byte Folded Reload
+ adcl %edx, %edi
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ adcl 76(%esp), %ebx # 4-byte Folded Reload
+ movl %eax, %edx
+ subl 80(%esp), %edx # 4-byte Folded Reload
+ movl %edi, %ebp
+ sbbl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %esi, %ecx
+ sbbl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ sbbl 72(%esp), %ecx # 4-byte Folded Reload
+ testl %ecx, %ecx
+ js .LBB54_2
+# BB#1:
+ movl %edx, %eax
+.LBB54_2:
+ movl 104(%esp), %edx
+ movl %eax, (%edx)
+ js .LBB54_4
+# BB#3:
+ movl %ebp, %edi
+.LBB54_4:
+ movl %edi, 4(%edx)
+ js .LBB54_6
+# BB#5:
+ movl 80(%esp), %esi # 4-byte Reload
+.LBB54_6:
+ movl %esi, 8(%edx)
+ js .LBB54_8
+# BB#7:
+ movl %ecx, %ebx
+.LBB54_8:
+ movl %ebx, 12(%edx)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end54:
+ .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L
+
+ .globl mcl_fp_montRed4L
+ .align 16, 0x90
+ .type mcl_fp_montRed4L,@function
+mcl_fp_montRed4L: # @mcl_fp_montRed4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl 92(%esp), %eax
+ movl -4(%eax), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl (%eax), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx
+ movl (%ecx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ imull %edx, %esi
+ movl 12(%eax), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 8(%eax), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 4(%eax), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebp
+ movl %edx, %ebp
+ movl %eax, %ebx
+ movl %esi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ addl %ebx, %edi
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ adcl 4(%ecx), %edi
+ adcl 8(%ecx), %ebp
+ adcl 12(%ecx), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 16(%ecx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl 24(%ecx), %edx
+ movl 20(%ecx), %ecx
+ adcl $0, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ imull 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ addl (%esp), %esi # 4-byte Folded Reload
+ movl %ecx, %ebx
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %edi, %eax
+ adcl %ebp, %esi
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 8(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %esi, %ebp
+ imull 56(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ movl %ecx, %ebx
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %esi, %eax
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl 56(%esp), %esi # 4-byte Reload
+ imull %ebp, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ mull 48(%esp) # 4-byte Folded Reload
+ addl 16(%esp), %eax # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ecx
+ addl %ebp, %esi
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edi
+ movl %eax, %ebp
+ subl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, %esi
+ sbbl 48(%esp), %esi # 4-byte Folded Reload
+ sbbl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ sbbl 44(%esp), %ebx # 4-byte Folded Reload
+ sbbl $0, %edi
+ andl $1, %edi
+ jne .LBB55_2
+# BB#1:
+ movl %ebp, %eax
+.LBB55_2:
+ movl 84(%esp), %ebp
+ movl %eax, (%ebp)
+ movl %edi, %eax
+ testb %al, %al
+ jne .LBB55_4
+# BB#3:
+ movl %esi, %edx
+.LBB55_4:
+ movl %edx, 4(%ebp)
+ movl 56(%esp), %eax # 4-byte Reload
+ jne .LBB55_6
+# BB#5:
+ movl 60(%esp), %eax # 4-byte Reload
+.LBB55_6:
+ movl %eax, 8(%ebp)
+ jne .LBB55_8
+# BB#7:
+ movl %ebx, %ecx
+.LBB55_8:
+ movl %ecx, 12(%ebp)
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end55:
+ .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L
+
+ .globl mcl_fp_addPre4L
+ .align 16, 0x90
+ .type mcl_fp_addPre4L,@function
+mcl_fp_addPre4L: # @mcl_fp_addPre4L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 20(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 12(%eax), %edi
+ movl 8(%eax), %eax
+ adcl 8(%esi), %eax
+ movl 12(%esi), %esi
+ movl 16(%esp), %ebx
+ movl %ecx, (%ebx)
+ movl %edx, 4(%ebx)
+ movl %eax, 8(%ebx)
+ adcl %edi, %esi
+ movl %esi, 12(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end56:
+ .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L
+
+ .globl mcl_fp_subPre4L
+ .align 16, 0x90
+ .type mcl_fp_subPre4L,@function
+mcl_fp_subPre4L: # @mcl_fp_subPre4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 28(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edi), %ebx
+ movl 12(%edi), %edi
+ movl 12(%ecx), %ecx
+ movl 20(%esp), %ebp
+ movl %edx, (%ebp)
+ movl %esi, 4(%ebp)
+ movl %ebx, 8(%ebp)
+ sbbl %edi, %ecx
+ movl %ecx, 12(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end57:
+ .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L
+
+ .globl mcl_fp_shr1_4L
+ .align 16, 0x90
+ .type mcl_fp_shr1_4L,@function
+mcl_fp_shr1_4L: # @mcl_fp_shr1_4L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl 12(%eax), %ecx
+ movl 8(%eax), %edx
+ movl (%eax), %esi
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl 12(%esp), %edi
+ movl %esi, (%edi)
+ shrdl $1, %edx, %eax
+ movl %eax, 4(%edi)
+ shrdl $1, %ecx, %edx
+ movl %edx, 8(%edi)
+ shrl %ecx
+ movl %ecx, 12(%edi)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end58:
+ .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L
+
+ .globl mcl_fp_add4L
+ .align 16, 0x90
+ .type mcl_fp_add4L,@function
+mcl_fp_add4L: # @mcl_fp_add4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ movl 24(%esp), %esi
+ addl (%esi), %eax
+ adcl 4(%esi), %ecx
+ movl 8(%edi), %edx
+ adcl 8(%esi), %edx
+ movl 12(%esi), %esi
+ adcl 12(%edi), %esi
+ movl 20(%esp), %edi
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ movl %edx, 8(%edi)
+ movl %esi, 12(%edi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 32(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %ecx
+ sbbl 8(%ebp), %edx
+ sbbl 12(%ebp), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB59_2
+# BB#1: # %nocarry
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ movl %edx, 8(%edi)
+ movl %esi, 12(%edi)
+.LBB59_2: # %carry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end59:
+ .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L
+
+ .globl mcl_fp_addNF4L
+ .align 16, 0x90
+ .type mcl_fp_addNF4L,@function
+mcl_fp_addNF4L: # @mcl_fp_addNF4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $8, %esp
+ movl 36(%esp), %edx
+ movl (%edx), %esi
+ movl 4(%edx), %ecx
+ movl 32(%esp), %edi
+ addl (%edi), %esi
+ adcl 4(%edi), %ecx
+ movl 12(%edx), %ebp
+ movl 8(%edx), %edx
+ adcl 8(%edi), %edx
+ adcl 12(%edi), %ebp
+ movl 40(%esp), %eax
+ movl %esi, %ebx
+ subl (%eax), %ebx
+ movl %ecx, %edi
+ sbbl 4(%eax), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl 40(%esp), %eax
+ sbbl 8(%eax), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ movl 40(%esp), %eax
+ sbbl 12(%eax), %edi
+ testl %edi, %edi
+ js .LBB60_2
+# BB#1:
+ movl %ebx, %esi
+.LBB60_2:
+ movl 28(%esp), %ebx
+ movl %esi, (%ebx)
+ js .LBB60_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB60_4:
+ movl %ecx, 4(%ebx)
+ js .LBB60_6
+# BB#5:
+ movl 4(%esp), %edx # 4-byte Reload
+.LBB60_6:
+ movl %edx, 8(%ebx)
+ js .LBB60_8
+# BB#7:
+ movl %edi, %ebp
+.LBB60_8:
+ movl %ebp, 12(%ebx)
+ addl $8, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end60:
+ .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L
+
+ .globl mcl_fp_sub4L
+ .align 16, 0x90
+ .type mcl_fp_sub4L,@function
+mcl_fp_sub4L: # @mcl_fp_sub4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 28(%esp), %edi
+ subl (%edi), %eax
+ sbbl 4(%edi), %ecx
+ movl 8(%esi), %edx
+ sbbl 8(%edi), %edx
+ movl 12(%esi), %esi
+ sbbl 12(%edi), %esi
+ movl 20(%esp), %edi
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ movl %edx, 8(%edi)
+ movl %esi, 12(%edi)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB61_2
+# BB#1: # %carry
+ movl 32(%esp), %ebx
+ addl (%ebx), %eax
+ movl 8(%ebx), %ebp
+ adcl 4(%ebx), %ecx
+ movl 12(%ebx), %ebx
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ adcl %edx, %ebp
+ movl %ebp, 8(%edi)
+ adcl %esi, %ebx
+ movl %ebx, 12(%edi)
+.LBB61_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end61:
+ .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L
+
+ .globl mcl_fp_subNF4L
+ .align 16, 0x90
+ .type mcl_fp_subNF4L,@function
+mcl_fp_subNF4L: # @mcl_fp_subNF4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $8, %esp
+ movl 32(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 36(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ sbbl 4(%esi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 12(%eax), %edi
+ movl 8(%eax), %edx
+ sbbl 8(%esi), %edx
+ sbbl 12(%esi), %edi
+ movl %edi, %esi
+ sarl $31, %esi
+ movl 40(%esp), %eax
+ movl 12(%eax), %ebp
+ andl %esi, %ebp
+ movl 8(%eax), %ecx
+ andl %esi, %ecx
+ movl 40(%esp), %eax
+ movl 4(%eax), %eax
+ andl %esi, %eax
+ movl 40(%esp), %ebx
+ andl (%ebx), %esi
+ addl (%esp), %esi # 4-byte Folded Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl 28(%esp), %ebx
+ movl %esi, (%ebx)
+ adcl %edx, %ecx
+ movl %eax, 4(%ebx)
+ movl %ecx, 8(%ebx)
+ adcl %edi, %ebp
+ movl %ebp, 12(%ebx)
+ addl $8, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end62:
+ .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L
+
+ .globl mcl_fpDbl_add4L
+ .align 16, 0x90
+ .type mcl_fpDbl_add4L,@function
+mcl_fpDbl_add4L: # @mcl_fpDbl_add4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $12, %esp
+ movl 40(%esp), %eax
+ movl (%eax), %edi
+ movl 4(%eax), %edx
+ movl 36(%esp), %esi
+ addl (%esi), %edi
+ adcl 4(%esi), %edx
+ movl 8(%eax), %ebx
+ adcl 8(%esi), %ebx
+ movl 12(%esi), %ebp
+ movl 32(%esp), %ecx
+ movl %edi, (%ecx)
+ movl 16(%esi), %edi
+ adcl 12(%eax), %ebp
+ adcl 16(%eax), %edi
+ movl %edx, 4(%ecx)
+ movl 28(%eax), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %ebx, 8(%ecx)
+ movl 24(%eax), %ebx
+ movl 20(%eax), %eax
+ movl %ebp, 12(%ecx)
+ movl 20(%esi), %edx
+ adcl %eax, %edx
+ movl 28(%esi), %ecx
+ movl 24(%esi), %ebp
+ adcl %ebx, %ebp
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 44(%esp), %eax
+ movl %edi, %esi
+ subl (%eax), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 4(%eax), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ sbbl 8(%eax), %esi
+ sbbl 12(%eax), %ecx
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB63_2
+# BB#1:
+ movl %esi, %ebp
+.LBB63_2:
+ testb %bl, %bl
+ jne .LBB63_4
+# BB#3:
+ movl (%esp), %edi # 4-byte Reload
+.LBB63_4:
+ movl 32(%esp), %eax
+ movl %edi, 16(%eax)
+ jne .LBB63_6
+# BB#5:
+ movl 4(%esp), %edx # 4-byte Reload
+.LBB63_6:
+ movl %edx, 20(%eax)
+ movl %ebp, 24(%eax)
+ movl 8(%esp), %edx # 4-byte Reload
+ jne .LBB63_8
+# BB#7:
+ movl %ecx, %edx
+.LBB63_8:
+ movl %edx, 28(%eax)
+ addl $12, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end63:
+ .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L
+
+ .globl mcl_fpDbl_sub4L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub4L,@function
+mcl_fpDbl_sub4L: # @mcl_fpDbl_sub4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ pushl %eax
+ movl 28(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 32(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %esi
+ movl 8(%eax), %ebx
+ sbbl 8(%ebp), %ebx
+ movl 24(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 12(%eax), %edx
+ sbbl 12(%ebp), %edx
+ movl %esi, 4(%ecx)
+ movl 16(%eax), %edi
+ sbbl 16(%ebp), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%ebp), %esi
+ movl %edx, 12(%ecx)
+ movl 20(%eax), %ebx
+ sbbl %esi, %ebx
+ movl 24(%ebp), %edx
+ movl 24(%eax), %esi
+ sbbl %edx, %esi
+ movl 28(%ebp), %edx
+ movl 28(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl $0, %edx
+ sbbl $0, %edx
+ andl $1, %edx
+ movl 36(%esp), %ecx
+ movl (%ecx), %eax
+ jne .LBB64_1
+# BB#2:
+ xorl %ebp, %ebp
+ jmp .LBB64_3
+.LBB64_1:
+ movl 4(%ecx), %ebp
+.LBB64_3:
+ testb %dl, %dl
+ jne .LBB64_5
+# BB#4:
+ movl $0, %eax
+.LBB64_5:
+ jne .LBB64_6
+# BB#7:
+ movl $0, %edx
+ jmp .LBB64_8
+.LBB64_6:
+ movl 12(%ecx), %edx
+.LBB64_8:
+ jne .LBB64_9
+# BB#10:
+ xorl %ecx, %ecx
+ jmp .LBB64_11
+.LBB64_9:
+ movl 8(%ecx), %ecx
+.LBB64_11:
+ addl %edi, %eax
+ adcl %ebx, %ebp
+ movl 24(%esp), %edi
+ movl %eax, 16(%edi)
+ adcl %esi, %ecx
+ movl %ebp, 20(%edi)
+ movl %ecx, 24(%edi)
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%edi)
+ addl $4, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end64:
+ .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L
+
+ .globl mcl_fp_mulUnitPre5L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre5L,@function
+mcl_fp_mulUnitPre5L: # @mcl_fp_mulUnitPre5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %esi
+ movl 44(%esp), %ecx
+ movl %esi, %eax
+ mull 16(%ecx)
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 12(%ecx)
+ movl %edx, %ebx
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 8(%ecx)
+ movl %edx, %edi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 4(%ecx)
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull (%ecx)
+ movl 40(%esp), %ecx
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%ecx)
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%ecx)
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%ecx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 20(%ecx)
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end65:
+ .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L
+
+ .globl mcl_fpDbl_mulPre5L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre5L,@function
+mcl_fpDbl_mulPre5L: # @mcl_fpDbl_mulPre5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl 88(%esp), %esi
+ movl (%esi), %ebp
+ movl 92(%esp), %eax
+ movl (%eax), %ebx
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull %ebx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 84(%esp), %edx
+ movl %eax, (%edx)
+ movl %esi, %eax
+ movl 4(%eax), %esi
+ movl 8(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 12(%eax), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 16(%eax), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 4(%edi), %edi
+ movl %esi, %eax
+ mull %edi
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl (%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ mull %edi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, %esi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %eax, %edi
+ movl %edx, %ebx
+ addl 36(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 84(%esp), %eax
+ movl %ebp, 4(%eax)
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 52(%esp), %edi # 4-byte Folded Reload
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl 88(%esp), %eax
+ movl %eax, %esi
+ movl 16(%esi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %edx, %eax
+ mull %ecx
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %esi, %edx
+ movl 8(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ movl (%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 4(%eax), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ mull %ecx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ecx
+ addl %edi, %eax
+ movl 84(%esp), %ecx
+ movl %eax, 8(%ecx)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl %ebx, %ecx
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl %ebp, %eax
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ addl %edx, %ecx
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 12(%eax), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ movl 84(%esp), %edx
+ movl %eax, 12(%edx)
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 60(%esp) # 4-byte Folded Spill
+ movl 92(%esp), %eax
+ movl 16(%eax), %ebp
+ sbbl %ecx, %ecx
+ movl %ebp, %eax
+ movl 88(%esp), %esi
+ mull 16(%esi)
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 12(%esi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 8(%esi)
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 4(%esi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull (%esi)
+ movl %eax, %ebp
+ andl $1, %ecx
+ addl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ addl %ebp, %edi
+ movl 84(%esp), %ebp
+ movl %edi, 16(%ebp)
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %eax, %edi
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %edx, %ebx
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %ebp, %edx
+ movl %ebx, 20(%edx)
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 24(%edx)
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 28(%edx)
+ movl %ecx, 32(%edx)
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%edx)
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end66:
+ .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L
+
+ .globl mcl_fpDbl_sqrPre5L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre5L,@function
+mcl_fpDbl_sqrPre5L: # @mcl_fpDbl_sqrPre5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ebx
+ movl 16(%ebx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl (%ebx), %edi
+ movl 4(%ebx), %ecx
+ mull %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 12(%ebx), %esi
+ movl %esi, %eax
+ mull %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 8(%ebx), %ebx
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %edi
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edi
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull %ecx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %edi, %eax
+ mull %edi
+ movl 80(%esp), %edi
+ movl %eax, (%edi)
+ addl %ecx, %edx
+ adcl %esi, %ebp
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %ecx, %edx
+ movl 80(%esp), %ecx
+ movl %edx, 4(%ecx)
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %esi, %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl (%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl 44(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx
+ movl 12(%ecx), %edi
+ movl %edi, %eax
+ mull %ebx
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl (%eax), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 4(%eax), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ mull %ebx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull %ebx
+ movl %eax, 44(%esp) # 4-byte Spill
+ addl %ebp, %ecx
+ movl 80(%esp), %eax
+ movl %ecx, 8(%eax)
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %esi, %eax
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 32(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl %edx, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 16(%eax), %ebx
+ movl %ebx, %eax
+ mull %edi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 28(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %edi, %eax
+ mull %edi
+ movl %eax, %edi
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 80(%esp), %eax
+ movl %ecx, 12(%eax)
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ sbbl %ecx, %ecx
+ movl %ebx, %eax
+ movl 84(%esp), %edx
+ mull 12(%edx)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl 84(%esp), %edx
+ mull 4(%edx)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl 84(%esp), %edx
+ mull (%edx)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %eax, 20(%esp) # 4-byte Spill
+ andl $1, %ecx
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ addl (%esp), %ebp # 4-byte Folded Reload
+ movl 80(%esp), %ebx
+ movl %ebp, 16(%ebx)
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %eax, %ebp
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 12(%esp), %esi # 4-byte Folded Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 20(%ebx)
+ adcl %edx, %ebp
+ movl %edi, 24(%ebx)
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end67:
+ .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L
+
+ .globl mcl_fp_mont5L
+ .align 16, 0x90
+ .type mcl_fp_mont5L,@function
+mcl_fp_mont5L: # @mcl_fp_mont5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $112, %esp
+ movl 136(%esp), %ebx
+ movl (%ebx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx
+ movl (%ecx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 144(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ imull %edx, %ecx
+ movl (%esi), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 16(%esi), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 4(%esi), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 4(%ebx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 16(%ebx), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 12(%ebx), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 8(%ebx), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 32(%esp), %ecx # 4-byte Reload
+ mull %ecx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, %ebx
+ movl %eax, %edi
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 60(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ addl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 16(%esp), %ebp # 4-byte Reload
+ addl 36(%esp), %ebp # 4-byte Folded Reload
+ adcl %edi, 40(%esp) # 4-byte Folded Spill
+ adcl %ebx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl 4(%eax), %edi
+ movl %edi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl %ebp, %edx
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %ebx, %ebp
+ imull 96(%esp), %ebp # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, %edx
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 36(%esp), %eax # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl 8(%eax), %ebx
+ movl %ebx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %eax, %esi
+ addl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %ebp, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl %edi, 48(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %esi, %ebp
+ imull 96(%esp), %ebp # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %edi, %edx
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl 12(%eax), %edi
+ movl %edi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %ebp, %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl %esi, 48(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %ebx, %ebp
+ imull 96(%esp), %ebp # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, %edx
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl 16(%eax), %ebx
+ movl %ebx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ addl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %ebx # 4-byte Reload
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %esi, %eax
+ adcl $0, %eax
+ movl 84(%esp), %esi # 4-byte Reload
+ addl %ebp, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl %edi, 80(%esp) # 4-byte Folded Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 76(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ movl 96(%esp), %ecx # 4-byte Reload
+ imull %esi, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ andl $1, %ebx
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ mull 88(%esp) # 4-byte Folded Reload
+ addl 48(%esp), %eax # 4-byte Folded Reload
+ adcl %ecx, %edx
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 84(%esp), %ecx # 4-byte Folded Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %eax, %ecx
+ subl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ sbbl 88(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ sbbl 104(%esp), %ecx # 4-byte Folded Reload
+ sbbl 108(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ sbbl 92(%esp), %ebp # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB68_2
+# BB#1:
+ movl 88(%esp), %edx # 4-byte Reload
+.LBB68_2:
+ testb %bl, %bl
+ jne .LBB68_4
+# BB#3:
+ movl 100(%esp), %eax # 4-byte Reload
+.LBB68_4:
+ movl 132(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %edx, 4(%ebx)
+ jne .LBB68_6
+# BB#5:
+ movl %ecx, %esi
+.LBB68_6:
+ movl %esi, 8(%ebx)
+ movl 96(%esp), %eax # 4-byte Reload
+ jne .LBB68_8
+# BB#7:
+ movl 108(%esp), %eax # 4-byte Reload
+.LBB68_8:
+ movl %eax, 12(%ebx)
+ jne .LBB68_10
+# BB#9:
+ movl %ebp, %edi
+.LBB68_10:
+ movl %edi, 16(%ebx)
+ addl $112, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end68:
+ .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L
+
+ .globl mcl_fp_montNF5L
+ .align 16, 0x90
+ .type mcl_fp_montNF5L,@function
+mcl_fp_montNF5L: # @mcl_fp_montNF5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $104, %esp
+ movl 128(%esp), %ebx
+ movl (%ebx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx
+ movl (%ecx), %ecx
+ mull %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 136(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, %edi
+ imull %edx, %edi
+ movl (%esi), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 16(%esi), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 4(%esi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 4(%ebx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 16(%ebx), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 12(%ebx), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 8(%ebx), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, %edi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, %ebp
+ movl %eax, %ebx
+ movl 76(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, %ecx
+ movl %eax, %esi
+ addl 68(%esp), %esi # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 8(%esp), %edx # 4-byte Reload
+ addl 48(%esp), %edx # 4-byte Folded Reload
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %eax
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 4(%eax), %ebx
+ movl %ebx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ addl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %esi, 28(%esp) # 4-byte Folded Spill
+ adcl %ecx, 32(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl %ebp, %ecx
+ adcl %edi, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 28(%esp), %esi # 4-byte Reload
+ movl %esi, %edi
+ imull 84(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ addl %esi, %eax
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ adcl %ecx, %ebp
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edx, %ebx
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %ecx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ addl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ imull 84(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ addl 32(%esp), %eax # 4-byte Folded Reload
+ adcl %ebp, %ecx
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl %edi, %ebx
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl %esi, %edi
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edx, %ecx
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 12(%eax), %edi
+ movl %edi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl %ecx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ imull 84(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ addl 20(%esp), %eax # 4-byte Folded Reload
+ movl %edi, %edx
+ adcl %ebp, %edx
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl %ebx, %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl %esi, %ecx
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 16(%eax), %ecx
+ movl %ecx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ addl 76(%esp), %edx # 4-byte Folded Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 84(%esp), %ecx # 4-byte Reload
+ imull %eax, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ mull 88(%esp) # 4-byte Folded Reload
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 52(%esp), %eax # 4-byte Folded Reload
+ adcl %edx, %ecx
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %edi # 4-byte Folded Reload
+ movl %eax, %ebx
+ subl 100(%esp), %ebx # 4-byte Folded Reload
+ movl %ecx, %edx
+ sbbl 88(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ sbbl 92(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ sbbl 96(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %edi, %edx
+ movl %edi, %esi
+ sbbl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %edx
+ sarl $31, %edx
+ testl %edx, %edx
+ js .LBB69_2
+# BB#1:
+ movl %ebx, %eax
+.LBB69_2:
+ movl 124(%esp), %edx
+ movl %eax, (%edx)
+ js .LBB69_4
+# BB#3:
+ movl 88(%esp), %ecx # 4-byte Reload
+.LBB69_4:
+ movl %ecx, 4(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB69_6
+# BB#5:
+ movl 92(%esp), %eax # 4-byte Reload
+.LBB69_6:
+ movl %eax, 8(%edx)
+ js .LBB69_8
+# BB#7:
+ movl 100(%esp), %ebp # 4-byte Reload
+.LBB69_8:
+ movl %ebp, 12(%edx)
+ js .LBB69_10
+# BB#9:
+ movl %edi, %esi
+.LBB69_10:
+ movl %esi, 16(%edx)
+ addl $104, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end69:
+ .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L
+
+ .globl mcl_fp_montRed5L
+ .align 16, 0x90
+ .type mcl_fp_montRed5L,@function
+mcl_fp_montRed5L: # @mcl_fp_montRed5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 116(%esp), %eax
+ movl -4(%eax), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl (%eax), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 112(%esp), %esi
+ movl (%esi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ imull %edx, %ecx
+ movl 16(%eax), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 12(%eax), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 8(%eax), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 4(%eax), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 80(%esp), %eax # 4-byte Folded Reload
+ adcl 4(%esi), %ebx
+ adcl 8(%esi), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 12(%esi), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 16(%esi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 20(%esi), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ movl 32(%esi), %ecx
+ movl 28(%esi), %edx
+ movl 24(%esi), %esi
+ adcl $0, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ imull 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %eax, %edi
+ addl %esi, %edx
+ movl %edx, %ebp
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebx, %edi
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ebp, %esi
+ imull 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %ebx
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl %ebx, %ebp
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 16(%esp), %eax # 4-byte Folded Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ebp, %edi
+ imull 76(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ addl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebp, 16(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ imull %ebx, %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ mull 60(%esp) # 4-byte Folded Reload
+ addl 24(%esp), %eax # 4-byte Folded Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl %ebx, %esi
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %eax, %esi
+ subl 84(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 60(%esp), %esi # 4-byte Folded Reload
+ sbbl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ sbbl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ movl %edi, %ecx
+ sbbl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ jne .LBB70_2
+# BB#1:
+ movl %esi, %edx
+.LBB70_2:
+ movl 80(%esp), %ebx # 4-byte Reload
+ testb %bl, %bl
+ jne .LBB70_4
+# BB#3:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB70_4:
+ movl 108(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edx, 4(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ jne .LBB70_6
+# BB#5:
+ movl %ebp, %eax
+.LBB70_6:
+ movl %eax, 8(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ jne .LBB70_8
+# BB#7:
+ movl 68(%esp), %eax # 4-byte Reload
+.LBB70_8:
+ movl %eax, 12(%ecx)
+ jne .LBB70_10
+# BB#9:
+ movl 84(%esp), %edi # 4-byte Reload
+.LBB70_10:
+ movl %edi, 16(%ecx)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end70:
+ .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L
+
+ .globl mcl_fp_addPre5L
+ .align 16, 0x90
+ .type mcl_fp_addPre5L,@function
+mcl_fp_addPre5L: # @mcl_fp_addPre5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 24(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %edi
+ adcl 8(%esi), %edi
+ movl 12(%esi), %ebx
+ movl 16(%esi), %esi
+ adcl 12(%eax), %ebx
+ movl 16(%eax), %eax
+ movl 20(%esp), %ebp
+ movl %ecx, (%ebp)
+ movl %edx, 4(%ebp)
+ movl %edi, 8(%ebp)
+ movl %ebx, 12(%ebp)
+ adcl %esi, %eax
+ movl %eax, 16(%ebp)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end71:
+ .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L
+
+ .globl mcl_fp_subPre5L
+ .align 16, 0x90
+ .type mcl_fp_subPre5L,@function
+mcl_fp_subPre5L: # @mcl_fp_subPre5L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %edx
+ xorl %eax, %eax
+ movl 20(%esp), %esi
+ subl (%esi), %edx
+ movl 12(%esp), %edi
+ movl %edx, (%edi)
+ movl 4(%ecx), %edx
+ sbbl 4(%esi), %edx
+ movl %edx, 4(%edi)
+ movl 8(%ecx), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, 8(%edi)
+ movl 12(%ecx), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%edi)
+ movl 16(%esi), %edx
+ movl 16(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 16(%edi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end72:
+ .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L
+
+ .globl mcl_fp_shr1_5L
+ .align 16, 0x90
+ .type mcl_fp_shr1_5L,@function
+mcl_fp_shr1_5L: # @mcl_fp_shr1_5L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %eax
+ movl 16(%eax), %ecx
+ movl 12(%eax), %edx
+ movl 8(%eax), %esi
+ movl (%eax), %edi
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %edi
+ movl 16(%esp), %ebx
+ movl %edi, (%ebx)
+ shrdl $1, %esi, %eax
+ movl %eax, 4(%ebx)
+ shrdl $1, %edx, %esi
+ movl %esi, 8(%ebx)
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%ebx)
+ shrl %ecx
+ movl %ecx, 16(%ebx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end73:
+ .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L
+
+ .globl mcl_fp_add5L
+ .align 16, 0x90
+ .type mcl_fp_add5L,@function
+mcl_fp_add5L: # @mcl_fp_add5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %ebx
+ movl (%ebx), %eax
+ movl 4(%ebx), %ecx
+ movl 24(%esp), %edi
+ addl (%edi), %eax
+ adcl 4(%edi), %ecx
+ movl 8(%ebx), %edx
+ adcl 8(%edi), %edx
+ movl 12(%edi), %esi
+ movl 16(%edi), %edi
+ adcl 12(%ebx), %esi
+ adcl 16(%ebx), %edi
+ movl 20(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %ecx, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl %esi, 12(%ebx)
+ movl %edi, 16(%ebx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 32(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %ecx
+ sbbl 8(%ebp), %edx
+ sbbl 12(%ebp), %esi
+ sbbl 16(%ebp), %edi
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB74_2
+# BB#1: # %nocarry
+ movl 20(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %ecx, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl %esi, 12(%ebx)
+ movl %edi, 16(%ebx)
+.LBB74_2: # %carry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end74:
+ .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L
+
+ .globl mcl_fp_addNF5L
+ .align 16, 0x90
+ .type mcl_fp_addNF5L,@function
+mcl_fp_addNF5L: # @mcl_fp_addNF5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %esi
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ movl 44(%esp), %edi
+ addl (%edi), %ebx
+ adcl 4(%edi), %eax
+ movl 16(%esi), %ecx
+ movl 12(%esi), %edx
+ movl 8(%esi), %ebp
+ adcl 8(%edi), %ebp
+ adcl 12(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 16(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edi
+ movl %ebx, %esi
+ subl (%edi), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %eax, %esi
+ sbbl 4(%edi), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ sbbl 8(%edi), %esi
+ sbbl 12(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ sbbl 16(%edi), %edx
+ movl %edx, %edi
+ sarl $31, %edi
+ testl %edi, %edi
+ js .LBB75_2
+# BB#1:
+ movl (%esp), %ebx # 4-byte Reload
+.LBB75_2:
+ movl 40(%esp), %edi
+ movl %ebx, (%edi)
+ js .LBB75_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB75_4:
+ movl %eax, 4(%edi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ js .LBB75_6
+# BB#5:
+ movl %esi, %ebp
+.LBB75_6:
+ movl %ebp, 8(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ js .LBB75_8
+# BB#7:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB75_8:
+ movl %ecx, 12(%edi)
+ js .LBB75_10
+# BB#9:
+ movl %edx, %eax
+.LBB75_10:
+ movl %eax, 16(%edi)
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end75:
+ .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L
+
+ .globl mcl_fp_sub5L
+ .align 16, 0x90
+ .type mcl_fp_sub5L,@function
+mcl_fp_sub5L: # @mcl_fp_sub5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ xorl %ebx, %ebx
+ movl 28(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %ecx
+ movl 8(%edi), %edx
+ sbbl 8(%ebp), %edx
+ movl 12(%edi), %esi
+ sbbl 12(%ebp), %esi
+ movl 16(%edi), %edi
+ sbbl 16(%ebp), %edi
+ movl 20(%esp), %ebp
+ movl %eax, (%ebp)
+ movl %ecx, 4(%ebp)
+ movl %edx, 8(%ebp)
+ movl %esi, 12(%ebp)
+ movl %edi, 16(%ebp)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB76_2
+# BB#1: # %carry
+ movl 32(%esp), %ebx
+ addl (%ebx), %eax
+ movl %eax, (%ebp)
+ adcl 4(%ebx), %ecx
+ movl %ecx, 4(%ebp)
+ adcl 8(%ebx), %edx
+ movl %edx, 8(%ebp)
+ movl 12(%ebx), %eax
+ adcl %esi, %eax
+ movl %eax, 12(%ebp)
+ movl 16(%ebx), %eax
+ adcl %edi, %eax
+ movl %eax, 16(%ebp)
+.LBB76_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end76:
+ .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L
+
+ .globl mcl_fp_subNF5L
+ .align 16, 0x90
+ .type mcl_fp_subNF5L,@function
+mcl_fp_subNF5L: # @mcl_fp_subNF5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 40(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 44(%esp), %ebx
+ subl (%ebx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ sbbl 4(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 16(%edi), %esi
+ movl 12(%edi), %eax
+ movl 8(%edi), %ecx
+ sbbl 8(%ebx), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ sbbl 12(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %esi
+ movl %esi, %ebx
+ sarl $31, %ebx
+ movl %ebx, %ebp
+ shldl $1, %esi, %ebp
+ movl 48(%esp), %edi
+ movl 4(%edi), %ecx
+ andl %ebp, %ecx
+ andl (%edi), %ebp
+ movl 16(%edi), %edx
+ andl %ebx, %edx
+ movl 12(%edi), %eax
+ andl %ebx, %eax
+ roll %ebx
+ andl 8(%edi), %ebx
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl 36(%esp), %edi
+ movl %ebp, (%edi)
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl %ecx, 4(%edi)
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 8(%edi)
+ movl %eax, 12(%edi)
+ adcl %esi, %edx
+ movl %edx, 16(%edi)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end77:
+ .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L
+
+ .globl mcl_fpDbl_add5L
+ .align 16, 0x90
+ .type mcl_fpDbl_add5L,@function
+mcl_fpDbl_add5L: # @mcl_fpDbl_add5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 56(%esp), %edx
+ movl 52(%esp), %ecx
+ movl 12(%ecx), %ebx
+ movl 16(%ecx), %ebp
+ movl 8(%edx), %esi
+ movl (%edx), %edi
+ addl (%ecx), %edi
+ movl 48(%esp), %eax
+ movl %edi, (%eax)
+ movl 4(%edx), %edi
+ adcl 4(%ecx), %edi
+ adcl 8(%ecx), %esi
+ adcl 12(%edx), %ebx
+ adcl 16(%edx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl %edi, 4(%eax)
+ movl 28(%edx), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl %esi, 8(%eax)
+ movl 20(%edx), %esi
+ movl %ebx, 12(%eax)
+ movl 20(%ecx), %ebp
+ adcl %esi, %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 24(%edx), %esi
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%eax)
+ movl 24(%ecx), %ebx
+ adcl %esi, %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 28(%ecx), %edi
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl 32(%ecx), %esi
+ adcl %eax, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 36(%edx), %eax
+ movl 36(%ecx), %edx
+ adcl %eax, %edx
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %ebp, %ecx
+ movl 60(%esp), %ebp
+ subl (%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ sbbl 4(%ebp), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ sbbl 8(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl %esi, %ebx
+ movl %edx, %esi
+ sbbl 12(%ebp), %ebx
+ sbbl 16(%ebp), %edx
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB78_2
+# BB#1:
+ movl %edx, %esi
+.LBB78_2:
+ testb %al, %al
+ movl 12(%esp), %ebp # 4-byte Reload
+ jne .LBB78_4
+# BB#3:
+ movl (%esp), %ebp # 4-byte Reload
+.LBB78_4:
+ movl 48(%esp), %eax
+ movl %ebp, 20(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl 20(%esp), %edx # 4-byte Reload
+ movl 16(%esp), %edi # 4-byte Reload
+ jne .LBB78_6
+# BB#5:
+ movl 4(%esp), %edi # 4-byte Reload
+.LBB78_6:
+ movl %edi, 24(%eax)
+ jne .LBB78_8
+# BB#7:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB78_8:
+ movl %edx, 28(%eax)
+ jne .LBB78_10
+# BB#9:
+ movl %ebx, %ecx
+.LBB78_10:
+ movl %ecx, 32(%eax)
+ movl %esi, 36(%eax)
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end78:
+ .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L
+
+ .globl mcl_fpDbl_sub5L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub5L,@function
+mcl_fpDbl_sub5L: # @mcl_fpDbl_sub5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 40(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edi
+ movl 44(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%eax), %ebx
+ sbbl 8(%edx), %ebx
+ movl 36(%esp), %ecx
+ movl %esi, (%ecx)
+ movl 12(%eax), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ecx)
+ movl 16(%eax), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ sbbl %ebx, %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 24(%edx), %esi
+ movl %edi, 16(%ecx)
+ movl 24(%eax), %ebp
+ sbbl %esi, %ebp
+ movl 28(%edx), %esi
+ movl 28(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl 32(%edx), %esi
+ movl 32(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 36(%edx), %edx
+ movl 36(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl $0, %edx
+ sbbl $0, %edx
+ andl $1, %edx
+ movl 48(%esp), %ebx
+ jne .LBB79_1
+# BB#2:
+ xorl %eax, %eax
+ jmp .LBB79_3
+.LBB79_1:
+ movl 16(%ebx), %eax
+.LBB79_3:
+ testb %dl, %dl
+ jne .LBB79_4
+# BB#5:
+ movl $0, %edx
+ movl $0, %esi
+ jmp .LBB79_6
+.LBB79_4:
+ movl (%ebx), %esi
+ movl 4(%ebx), %edx
+.LBB79_6:
+ jne .LBB79_7
+# BB#8:
+ movl $0, %edi
+ jmp .LBB79_9
+.LBB79_7:
+ movl 12(%ebx), %edi
+.LBB79_9:
+ jne .LBB79_10
+# BB#11:
+ xorl %ebx, %ebx
+ jmp .LBB79_12
+.LBB79_10:
+ movl 8(%ebx), %ebx
+.LBB79_12:
+ addl 4(%esp), %esi # 4-byte Folded Reload
+ adcl %ebp, %edx
+ movl %esi, 20(%ecx)
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl %edx, 24(%ecx)
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 28(%ecx)
+ movl %edi, 32(%ecx)
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end79:
+ .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L
+
+ .globl mcl_fp_mulUnitPre6L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre6L,@function
+mcl_fp_mulUnitPre6L: # @mcl_fp_mulUnitPre6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 56(%esp), %ebx
+ movl 52(%esp), %edi
+ movl %ebx, %eax
+ mull 20(%edi)
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 16(%edi)
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 12(%edi)
+ movl %edx, %esi
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 8(%edi)
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 4(%edi)
+ movl %edx, %ecx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull (%edi)
+ movl 48(%esp), %edi
+ movl %eax, (%edi)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%edi)
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%edi)
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%edi)
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%edi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 24(%edi)
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end80:
+ .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L
+
+ .globl mcl_fpDbl_mulPre6L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre6L,@function
+mcl_fpDbl_mulPre6L: # @mcl_fpDbl_mulPre6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %esi
+ movl (%esi), %ebp
+ movl 112(%esp), %eax
+ movl (%eax), %edi
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 104(%esp), %edx
+ movl %eax, (%edx)
+ movl 4(%esi), %ebx
+ movl 8(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 20(%esi), %ecx
+ movl 112(%esp), %eax
+ movl 4(%eax), %esi
+ movl %ebx, %eax
+ mull %esi
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %esi
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edi
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, %eax
+ mull %esi
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, %ecx
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, %esi
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edi
+ movl %eax, %ebx
+ movl %edx, %edi
+ addl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 32(%esp), %ebp # 4-byte Folded Reload
+ movl 104(%esp), %eax
+ movl %ebp, 4(%eax)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %edx, %ecx
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ sbbl %edx, %edx
+ andl $1, %edx
+ addl 60(%esp), %ebx # 4-byte Folded Reload
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp
+ movl 20(%ebp), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %edx, %eax
+ mull %ecx
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 16(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 12(%ebp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 8(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl (%ebp), %esi
+ movl 4(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ mull %ecx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ movl 104(%esp), %ecx
+ movl %eax, 8(%ecx)
+ adcl %edi, %ebp
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 72(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 76(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl %eax, 80(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl 112(%esp), %eax
+ movl 12(%eax), %ecx
+ movl 20(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edx, 32(%esp) # 4-byte Spill
+ andl $1, %edi
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, %ecx
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ movl 104(%esp), %ebx
+ movl %ebp, 12(%ebx)
+ movl %esi, %ebx
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, %esi
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ sbbl %edx, %edx
+ andl $1, %edx
+ addl 16(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 108(%esp), %eax
+ movl %eax, %ecx
+ movl 20(%ecx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 16(%ecx), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 12(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 8(%ecx), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl (%ecx), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 4(%ecx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl 112(%esp), %esi
+ movl 16(%esi), %ecx
+ mull %ecx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl 72(%esp), %eax # 4-byte Folded Reload
+ movl 104(%esp), %ecx
+ movl %eax, 16(%ecx)
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 64(%esp) # 4-byte Folded Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 20(%eax), %ecx
+ sbbl %esi, %esi
+ movl 20(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 4(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edx, 56(%esp) # 4-byte Spill
+ andl $1, %esi
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ movl 104(%esp), %edx
+ movl %ebp, 20(%edx)
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edx
+ movl %ecx, %ebp
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 60(%esp), %ebx # 4-byte Folded Reload
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl 104(%esp), %ecx
+ movl %ebx, 24(%ecx)
+ movl %edx, %ebx
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 28(%ecx)
+ movl %ebp, %edx
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 32(%ecx)
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 36(%ecx)
+ movl %esi, 40(%ecx)
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end81:
+ .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L
+
+ .globl mcl_fpDbl_sqrPre6L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre6L,@function
+mcl_fpDbl_sqrPre6L: # @mcl_fpDbl_sqrPre6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 124(%esp), %esi
+ movl 20(%esi), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl (%esi), %ebp
+ movl 4(%esi), %ebx
+ mull %ebx
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 16(%esi), %ecx
+ movl %ecx, %eax
+ mull %ebx
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 12(%esi), %edi
+ movl %edi, %eax
+ mull %ebx
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl 8(%eax), %esi
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ebp
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebp
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull %ebx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %edx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull %ebp
+ movl 120(%esp), %ebx
+ movl %eax, (%ebx)
+ addl %edi, %edx
+ adcl %esi, %ecx
+ movl %ecx, %ebx
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ addl %edi, %edx
+ movl 120(%esp), %edi
+ movl %edx, 4(%edi)
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %esi, %edx
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edi
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %ebx, %esi
+ addl 32(%esp), %esi # 4-byte Folded Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 76(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %edi
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 124(%esp), %ebx
+ movl 20(%ebx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 8(%ebx), %ebp
+ mull %ebp
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 16(%ebx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ mull %ebp
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ mull %ebp
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl (%ebx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 4(%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ mull %ebp
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull %ebp
+ movl %eax, %ebp
+ addl %esi, %ebx
+ movl 120(%esp), %eax
+ movl %ebx, 8(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ adcl 92(%esp), %ebp # 4-byte Folded Reload
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl 80(%esp), %ebx # 4-byte Reload
+ adcl %edi, %ebx
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl %edx, %eax
+ movl %eax, %ebp
+ adcl 76(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 80(%esp) # 4-byte Spill
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ movl 36(%esp), %edi # 4-byte Reload
+ mull %edi
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 56(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull %edi
+ movl %eax, %edi
+ movl %edx, 36(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl 120(%esp), %eax
+ movl %esi, 12(%eax)
+ adcl 96(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 92(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx
+ movl (%ecx), %ebx
+ movl 4(%ecx), %edi
+ movl 20(%ecx), %ebp
+ movl %edi, %eax
+ mull %ebp
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 16(%ecx), %esi
+ movl %edi, %eax
+ mull %esi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %esi
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ addl %eax, 72(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 96(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 92(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl 12(%eax), %edi
+ movl 8(%eax), %ebx
+ movl %edi, %eax
+ mull %ebp
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %esi
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %esi
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull %esi
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ebp
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %esi
+ movl %eax, %ebx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ movl 120(%esp), %ebp
+ movl %eax, 16(%ebp)
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ adcl %ecx, %edi
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ adcl 92(%esp), %ebx # 4-byte Folded Reload
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl 28(%esp), %eax # 4-byte Folded Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ adcl (%esp), %edx # 4-byte Folded Reload
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ addl 48(%esp), %eax # 4-byte Folded Reload
+ movl 120(%esp), %ebp
+ movl %eax, 20(%ebp)
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, %eax
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ sbbl %edx, %edx
+ andl $1, %edx
+ addl 64(%esp), %edi # 4-byte Folded Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl 120(%esp), %ebp
+ movl %edi, 24(%ebp)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ecx, 28(%ebp)
+ movl %eax, %edi
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 32(%ebp)
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 36(%ebp)
+ movl %esi, 40(%ebp)
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%ebp)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end82:
+ .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L
+
+ .globl mcl_fp_mont6L
+ .align 16, 0x90
+ .type mcl_fp_mont6L,@function
+mcl_fp_mont6L: # @mcl_fp_mont6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $132, %esp
+ movl 156(%esp), %edi
+ movl (%edi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 160(%esp), %ecx
+ movl (%ecx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 164(%esp), %edx
+ movl -4(%edx), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull %ecx, %ebp
+ movl (%edx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 16(%edx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 12(%edx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 8(%edx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 4(%edx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 4(%edi), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl 20(%eax), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 16(%eax), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 12(%eax), %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ movl 8(%eax), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %esi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ mull %ebp
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %edx, %ebx
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %ebp, %ecx
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, %edi
+ addl 64(%esp), %edi # 4-byte Folded Reload
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 20(%esp), %ebx # 4-byte Reload
+ addl 68(%esp), %ebx # 4-byte Folded Reload
+ adcl %edi, 40(%esp) # 4-byte Folded Spill
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 4(%eax), %ecx
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ addl %esi, %edx
+ movl %edx, %esi
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl %edi, %ebx
+ movl %ebx, %edi
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 64(%esp), %ebx # 4-byte Reload
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %ebx, %esi
+ imull 112(%esp), %esi # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %ecx
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %eax, %edi
+ movl %edx, %ebx
+ addl %ecx, %ebx
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 64(%esp), %edi # 4-byte Folded Reload
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 160(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ addl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edi
+ addl %ebx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl %ebp, 44(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 40(%esp) # 4-byte Folded Spill
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ movl %eax, %esi
+ imull 112(%esp), %esi # 4-byte Folded Reload
+ andl $1, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %eax, %esi
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ecx, %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 32(%esp), %esi # 4-byte Folded Reload
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 160(%esp), %eax
+ movl 12(%eax), %edi
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ addl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %ebx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl %ebp, 52(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 48(%esp) # 4-byte Folded Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ movl %eax, %esi
+ imull 112(%esp), %esi # 4-byte Folded Reload
+ andl $1, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %eax, %esi
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ecx, %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 32(%esp), %esi # 4-byte Folded Reload
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 160(%esp), %eax
+ movl 16(%eax), %edi
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ addl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %ebx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl %ebp, 48(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 44(%esp) # 4-byte Folded Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ movl %eax, %esi
+ imull 112(%esp), %esi # 4-byte Folded Reload
+ andl $1, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %eax, %ecx
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 160(%esp), %eax
+ movl 20(%eax), %edi
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ addl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %esi, %eax
+ adcl $0, %eax
+ movl 100(%esp), %esi # 4-byte Reload
+ addl %ebx, %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl %ebp, 92(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 80(%esp) # 4-byte Folded Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 88(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl 112(%esp), %ecx # 4-byte Reload
+ imull %esi, %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ andl $1, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ mull 104(%esp) # 4-byte Folded Reload
+ addl 56(%esp), %eax # 4-byte Folded Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 100(%esp), %esi # 4-byte Folded Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ adcl 96(%esp), %ecx # 4-byte Folded Reload
+ adcl 88(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 112(%esp) # 4-byte Spill
+ adcl 84(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %eax, %esi
+ subl 108(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 104(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ sbbl 116(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl %ebx, %esi
+ movl %edi, %ebx
+ sbbl 120(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ movl %ebp, %edi
+ sbbl 124(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ movl %ebp, %esi
+ sbbl 128(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 128(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB83_2
+# BB#1:
+ movl 104(%esp), %edx # 4-byte Reload
+.LBB83_2:
+ testb %bl, %bl
+ jne .LBB83_4
+# BB#3:
+ movl 108(%esp), %eax # 4-byte Reload
+.LBB83_4:
+ movl 152(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %edx, 4(%ebx)
+ jne .LBB83_6
+# BB#5:
+ movl 116(%esp), %ecx # 4-byte Reload
+.LBB83_6:
+ movl %ecx, 8(%ebx)
+ movl 112(%esp), %eax # 4-byte Reload
+ jne .LBB83_8
+# BB#7:
+ movl 120(%esp), %eax # 4-byte Reload
+.LBB83_8:
+ movl %eax, 12(%ebx)
+ jne .LBB83_10
+# BB#9:
+ movl 124(%esp), %edi # 4-byte Reload
+.LBB83_10:
+ movl %edi, 16(%ebx)
+ jne .LBB83_12
+# BB#11:
+ movl 128(%esp), %ebp # 4-byte Reload
+.LBB83_12:
+ movl %ebp, 20(%ebx)
+ addl $132, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end83:
+ .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L
+
+ .globl mcl_fp_montNF6L
+ .align 16, 0x90
+ .type mcl_fp_montNF6L,@function
+mcl_fp_montNF6L: # @mcl_fp_montNF6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $132, %esp
+ movl 156(%esp), %ebx
+ movl (%ebx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 160(%esp), %ecx
+ movl (%ecx), %edi
+ mull %edi
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 164(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ imull %edx, %ecx
+ movl (%esi), %edx
+ movl %edx, 128(%esp) # 4-byte Spill
+ movl 20(%esi), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 4(%esi), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 4(%ebx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 16(%ebx), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 12(%ebx), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 8(%ebx), %ebx
+ movl %ebx, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %edi
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, %ecx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edi
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, %ebp
+ movl %eax, %esi
+ addl 64(%esp), %esi # 4-byte Folded Reload
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 16(%esp), %edi # 4-byte Reload
+ addl 72(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 4(%eax), %edi
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ addl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %esi, 40(%esp) # 4-byte Folded Spill
+ adcl %ebp, 44(%esp) # 4-byte Folded Spill
+ adcl %ebx, 48(%esp) # 4-byte Folded Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, %ebp
+ imull 96(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ addl %ecx, %eax
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %esi, %ebp
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edx, %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl %esi, %ebp
+ adcl %edi, %ebx
+ movl %ebx, %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 24(%esp), %ebx # 4-byte Reload
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ movl %ebx, %ecx
+ imull 96(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ adcl %ebp, %esi
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 12(%eax), %ecx
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl %ebx, %ebp
+ adcl %esi, %edi
+ movl %edi, %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 24(%esp), %edi # 4-byte Reload
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edi, %esi
+ movl %edi, %ecx
+ imull 96(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ adcl %ebp, %ebx
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 16(%eax), %ecx
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ addl %ebp, %ebx
+ adcl %edi, %esi
+ movl %esi, %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 24(%esp), %esi # 4-byte Reload
+ addl 40(%esp), %esi # 4-byte Folded Reload
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %esi, %ebx
+ movl %esi, %ecx
+ imull 96(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ebx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ addl %ecx, %eax
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl %edi, %ebx
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edx, %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 20(%eax), %ecx
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %edi
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 80(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ adcl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %ebx # 4-byte Reload
+ imull %ebp, %ebx
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ebx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ addl 52(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ adcl 88(%esp), %esi # 4-byte Folded Reload
+ adcl 84(%esp), %ebp # 4-byte Folded Reload
+ movl 104(%esp), %ebx # 4-byte Reload
+ adcl 100(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 72(%esp), %eax # 4-byte Folded Reload
+ adcl %edx, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ adcl 92(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 104(%esp) # 4-byte Spill
+ adcl 96(%esp), %edi # 4-byte Folded Reload
+ movl %eax, %edx
+ subl 128(%esp), %edx # 4-byte Folded Reload
+ sbbl 112(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebx
+ sbbl 116(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl %ebp, %ecx
+ sbbl 120(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ sbbl 124(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl %edi, %ecx
+ movl %edi, %esi
+ movl %ecx, %edi
+ sbbl 108(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %ecx
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ js .LBB84_2
+# BB#1:
+ movl %edx, %eax
+.LBB84_2:
+ movl 152(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB84_4
+# BB#3:
+ movl %ebx, %eax
+.LBB84_4:
+ movl %eax, 4(%ecx)
+ movl %ecx, %ebx
+ movl %esi, %eax
+ movl 104(%esp), %ecx # 4-byte Reload
+ movl 100(%esp), %edx # 4-byte Reload
+ js .LBB84_6
+# BB#5:
+ movl 116(%esp), %edx # 4-byte Reload
+.LBB84_6:
+ movl %edx, 8(%ebx)
+ movl %ebx, %edx
+ js .LBB84_8
+# BB#7:
+ movl 120(%esp), %ebp # 4-byte Reload
+.LBB84_8:
+ movl %ebp, 12(%edx)
+ js .LBB84_10
+# BB#9:
+ movl 128(%esp), %ecx # 4-byte Reload
+.LBB84_10:
+ movl %ecx, 16(%edx)
+ js .LBB84_12
+# BB#11:
+ movl %edi, %eax
+.LBB84_12:
+ movl %eax, 20(%edx)
+ addl $132, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end84:
+ .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L
+
+ .globl mcl_fp_montRed6L
+ .align 16, 0x90
+ .type mcl_fp_montRed6L,@function
+mcl_fp_montRed6L: # @mcl_fp_montRed6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $104, %esp
+ movl 132(%esp), %eax
+ movl -4(%eax), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl (%eax), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 128(%esp), %ebp
+ movl (%ebp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ imull %edx, %ecx
+ movl 20(%eax), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 16(%eax), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 12(%eax), %ebx
+ movl %ebx, 88(%esp) # 4-byte Spill
+ movl 8(%eax), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 4(%eax), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, %esi
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl %edi, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 72(%esp), %ebx # 4-byte Folded Reload
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl 4(%ebp), %ebx
+ adcl 8(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 12(%ebp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 16(%ebp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 20(%ebp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 24(%ebp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 44(%ebp), %eax
+ movl 40(%ebp), %edx
+ movl 36(%ebp), %esi
+ movl 32(%ebp), %edi
+ movl 28(%ebp), %ecx
+ adcl $0, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %esi
+ imull 96(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %esi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, %esi
+ addl %edi, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl %ebp, %ecx
+ movl %ecx, %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl %ebx, %esi
+ movl 24(%esp), %esi # 4-byte Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ adcl $0, 60(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %esi, %ebx
+ imull 96(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl %ecx, %ebp
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 24(%esp), %eax # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl $0, 60(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ebp, %ecx
+ imull 96(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, %edi
+ addl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl %ebx, %esi
+ movl %esi, %ebx
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebp, %edi
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %edi, %esi
+ imull 96(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %esi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl %ecx, %edx
+ movl %edx, %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl 96(%esp), %ebx # 4-byte Reload
+ imull %ebp, %ebx
+ movl %ebx, 96(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ movl %ebx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ mull 80(%esp) # 4-byte Folded Reload
+ addl %ebx, %eax
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ addl %esi, 28(%esp) # 4-byte Folded Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %eax, %esi
+ subl 92(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 80(%esp) # 4-byte Spill
+ sbbl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+ sbbl 88(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 88(%esp) # 4-byte Spill
+ sbbl 100(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ movl %edi, %esi
+ sbbl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB85_2
+# BB#1:
+ movl 80(%esp), %edx # 4-byte Reload
+.LBB85_2:
+ testb %bl, %bl
+ jne .LBB85_4
+# BB#3:
+ movl 72(%esp), %eax # 4-byte Reload
+.LBB85_4:
+ movl 124(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %edx, 4(%ebx)
+ movl 52(%esp), %ecx # 4-byte Reload
+ jne .LBB85_6
+# BB#5:
+ movl 84(%esp), %ecx # 4-byte Reload
+.LBB85_6:
+ movl %ecx, 8(%ebx)
+ movl %edi, %ecx
+ movl 60(%esp), %edi # 4-byte Reload
+ movl 96(%esp), %esi # 4-byte Reload
+ jne .LBB85_8
+# BB#7:
+ movl 88(%esp), %esi # 4-byte Reload
+.LBB85_8:
+ movl %esi, 12(%ebx)
+ jne .LBB85_10
+# BB#9:
+ movl 92(%esp), %edi # 4-byte Reload
+.LBB85_10:
+ movl %edi, 16(%ebx)
+ jne .LBB85_12
+# BB#11:
+ movl 100(%esp), %ecx # 4-byte Reload
+.LBB85_12:
+ movl %ecx, 20(%ebx)
+ addl $104, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end85:
+ .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L
+
+ .globl mcl_fp_addPre6L
+ .align 16, 0x90
+ .type mcl_fp_addPre6L,@function
+mcl_fp_addPre6L: # @mcl_fp_addPre6L
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 12(%esp), %edx
+ addl (%edx), %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 4(%eax), %ecx
+ adcl 4(%edx), %ecx
+ movl %ecx, 4(%esi)
+ movl 8(%eax), %ecx
+ adcl 8(%edx), %ecx
+ movl %ecx, 8(%esi)
+ movl 12(%edx), %ecx
+ adcl 12(%eax), %ecx
+ movl %ecx, 12(%esi)
+ movl 16(%edx), %ecx
+ adcl 16(%eax), %ecx
+ movl %ecx, 16(%esi)
+ movl 20(%eax), %eax
+ movl 20(%edx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 20(%esi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end86:
+ .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L
+
+ .globl mcl_fp_subPre6L
+ .align 16, 0x90
+ .type mcl_fp_subPre6L,@function
+mcl_fp_subPre6L: # @mcl_fp_subPre6L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %edx
+ xorl %eax, %eax
+ movl 20(%esp), %esi
+ subl (%esi), %edx
+ movl 12(%esp), %edi
+ movl %edx, (%edi)
+ movl 4(%ecx), %edx
+ sbbl 4(%esi), %edx
+ movl %edx, 4(%edi)
+ movl 8(%ecx), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, 8(%edi)
+ movl 12(%ecx), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%edi)
+ movl 16(%ecx), %edx
+ sbbl 16(%esi), %edx
+ movl %edx, 16(%edi)
+ movl 20(%esi), %edx
+ movl 20(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 20(%edi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end87:
+ .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L
+
+ .globl mcl_fp_shr1_6L
+ .align 16, 0x90
+ .type mcl_fp_shr1_6L,@function
+mcl_fp_shr1_6L: # @mcl_fp_shr1_6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl 20(%eax), %ecx
+ movl 16(%eax), %edx
+ movl 12(%eax), %esi
+ movl 8(%eax), %edi
+ movl (%eax), %ebx
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %ebx
+ movl 20(%esp), %ebp
+ movl %ebx, (%ebp)
+ shrdl $1, %edi, %eax
+ movl %eax, 4(%ebp)
+ shrdl $1, %esi, %edi
+ movl %edi, 8(%ebp)
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ebp)
+ shrdl $1, %ecx, %edx
+ movl %edx, 16(%ebp)
+ shrl %ecx
+ movl %ecx, 20(%ebp)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end88:
+ .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L
+
+ .globl mcl_fp_add6L
+ .align 16, 0x90
+ .type mcl_fp_add6L,@function
+mcl_fp_add6L: # @mcl_fp_add6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $12, %esp
+ movl 40(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ebp
+ movl 36(%esp), %ebx
+ addl (%ebx), %edx
+ adcl 4(%ebx), %ebp
+ movl 8(%eax), %ecx
+ adcl 8(%ebx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %edi
+ adcl 12(%eax), %ecx
+ adcl 16(%eax), %edi
+ movl 20(%ebx), %ebx
+ adcl 20(%eax), %ebx
+ movl 32(%esp), %eax
+ movl %edx, (%eax)
+ movl %ebp, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %ecx, 12(%eax)
+ movl %edi, 16(%eax)
+ movl %ebx, 20(%eax)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 44(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 8(%esp), %edx # 4-byte Reload
+ movl 44(%esp), %esi
+ sbbl 4(%esi), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ sbbl 8(%esi), %edx
+ sbbl 12(%esi), %ebp
+ sbbl 16(%esi), %edi
+ sbbl 20(%esi), %ebx
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB89_2
+# BB#1: # %nocarry
+ movl (%esp), %eax # 4-byte Reload
+ movl 32(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ecx)
+ movl %edx, 8(%ecx)
+ movl %ebp, 12(%ecx)
+ movl %edi, 16(%ecx)
+ movl %ebx, 20(%ecx)
+.LBB89_2: # %carry
+ addl $12, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end89:
+ .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L
+
+ .globl mcl_fp_addNF6L
+ .align 16, 0x90
+ .type mcl_fp_addNF6L,@function
+mcl_fp_addNF6L: # @mcl_fp_addNF6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 68(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 64(%esp), %ebp
+ addl (%ebp), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ adcl 4(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 20(%eax), %edx
+ movl 16(%eax), %esi
+ movl 12(%eax), %edi
+ movl 8(%eax), %eax
+ adcl 8(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 12(%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 16(%ebp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%ebp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ movl 72(%esp), %ebx
+ subl (%ebx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ movl 72(%esp), %ecx
+ sbbl 4(%ecx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ sbbl 8(%ecx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 12(%ecx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl %esi, %edi
+ sbbl 16(%ecx), %edi
+ movl %edx, %esi
+ sbbl 20(%ecx), %esi
+ movl %esi, %ebx
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ js .LBB90_2
+# BB#1:
+ movl (%esp), %eax # 4-byte Reload
+.LBB90_2:
+ movl 60(%esp), %ebx
+ movl %eax, (%ebx)
+ movl 20(%esp), %ecx # 4-byte Reload
+ js .LBB90_4
+# BB#3:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB90_4:
+ movl %ecx, 4(%ebx)
+ movl 36(%esp), %eax # 4-byte Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ movl 24(%esp), %ecx # 4-byte Reload
+ js .LBB90_6
+# BB#5:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB90_6:
+ movl %ecx, 8(%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ js .LBB90_8
+# BB#7:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB90_8:
+ movl %edx, 12(%ebx)
+ js .LBB90_10
+# BB#9:
+ movl %edi, %ecx
+.LBB90_10:
+ movl %ecx, 16(%ebx)
+ js .LBB90_12
+# BB#11:
+ movl %esi, %eax
+.LBB90_12:
+ movl %eax, 20(%ebx)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end90:
+ .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L
+
+ .globl mcl_fp_sub6L
+ .align 16, 0x90
+ .type mcl_fp_sub6L,@function
+mcl_fp_sub6L: # @mcl_fp_sub6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 40(%esp), %ebx
+ movl (%ebx), %esi
+ movl 4(%ebx), %edi
+ movl 44(%esp), %ecx
+ subl (%ecx), %esi
+ sbbl 4(%ecx), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl 8(%ebx), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ sbbl 12(%ecx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 16(%ebx), %ebp
+ sbbl 16(%ecx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 20(%ebx), %edx
+ sbbl 20(%ecx), %edx
+ movl $0, %ecx
+ sbbl $0, %ecx
+ testb $1, %cl
+ movl 36(%esp), %ebx
+ movl %esi, (%ebx)
+ movl %edi, 4(%ebx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl %eax, 12(%ebx)
+ movl %ebp, 16(%ebx)
+ movl %edx, 20(%ebx)
+ je .LBB91_2
+# BB#1: # %carry
+ movl 48(%esp), %ecx
+ addl (%ecx), %esi
+ movl %esi, (%ebx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 4(%ecx), %eax
+ adcl 8(%ecx), %edi
+ movl %eax, 4(%ebx)
+ movl 12(%ecx), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl %eax, 12(%ebx)
+ movl 16(%ecx), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ebx)
+ movl 20(%ecx), %eax
+ adcl %edx, %eax
+ movl %eax, 20(%ebx)
+.LBB91_2: # %nocarry
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end91:
+ .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L
+
+ .globl mcl_fp_subNF6L
+ .align 16, 0x90
+ .type mcl_fp_subNF6L,@function
+mcl_fp_subNF6L: # @mcl_fp_subNF6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 48(%esp), %ebx
+ movl 20(%ebx), %esi
+ movl (%ebx), %ecx
+ movl 4(%ebx), %eax
+ movl 52(%esp), %ebp
+ subl (%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 4(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 16(%ebx), %eax
+ movl 12(%ebx), %ecx
+ movl 8(%ebx), %edx
+ sbbl 8(%ebp), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl 16(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %edx
+ sbbl 20(%ebp), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl %edx, %ebp
+ sarl $31, %ebp
+ movl %ebp, %ecx
+ addl %ecx, %ecx
+ movl %ebp, %eax
+ adcl %eax, %eax
+ shrl $31, %edx
+ orl %ecx, %edx
+ movl 56(%esp), %ebx
+ andl 4(%ebx), %eax
+ andl (%ebx), %edx
+ movl 20(%ebx), %edi
+ andl %ebp, %edi
+ movl 16(%ebx), %esi
+ andl %ebp, %esi
+ movl 12(%ebx), %ecx
+ andl %ebp, %ecx
+ andl 8(%ebx), %ebp
+ addl 8(%esp), %edx # 4-byte Folded Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl 44(%esp), %ebx
+ movl %edx, (%ebx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 4(%ebx)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 8(%ebx)
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, 12(%ebx)
+ movl %esi, 16(%ebx)
+ adcl (%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%ebx)
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end92:
+ .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L
+
+ .globl mcl_fpDbl_add6L
+ .align 16, 0x90
+ .type mcl_fpDbl_add6L,@function
+mcl_fpDbl_add6L: # @mcl_fpDbl_add6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 64(%esp), %edx
+ movl 60(%esp), %ecx
+ movl 12(%ecx), %esi
+ movl 16(%ecx), %eax
+ movl 8(%edx), %edi
+ movl (%edx), %ebx
+ addl (%ecx), %ebx
+ movl 56(%esp), %ebp
+ movl %ebx, (%ebp)
+ movl 4(%edx), %ebx
+ adcl 4(%ecx), %ebx
+ adcl 8(%ecx), %edi
+ adcl 12(%edx), %esi
+ adcl 16(%edx), %eax
+ movl %ebx, 4(%ebp)
+ movl %edx, %ebx
+ movl 32(%ebx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %edi, 8(%ebp)
+ movl 20(%ebx), %edi
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ adcl %edi, %esi
+ movl 24(%ebx), %edi
+ movl %eax, 16(%ebp)
+ movl 24(%ecx), %edx
+ adcl %edi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 28(%ebx), %edi
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %eax
+ adcl %edi, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 32(%ecx), %ebp
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 36(%ebx), %esi
+ movl %ebx, %edi
+ movl 36(%ecx), %ebx
+ adcl %esi, %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 40(%edi), %esi
+ movl 40(%ecx), %edi
+ adcl %esi, %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 64(%esp), %esi
+ movl 44(%esi), %esi
+ movl 44(%ecx), %ecx
+ adcl %esi, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 68(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %edx
+ sbbl 4(%edx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 8(%edx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ sbbl 12(%edx), %ebp
+ movl %edi, %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ sbbl 16(%edx), %ebx
+ movl %edi, %eax
+ sbbl 20(%edx), %eax
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB93_2
+# BB#1:
+ movl %eax, %edi
+.LBB93_2:
+ testb %cl, %cl
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl 16(%esp), %edx # 4-byte Reload
+ jne .LBB93_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB93_4:
+ movl 56(%esp), %eax
+ movl %ecx, 24(%eax)
+ movl %edx, 28(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl 24(%esp), %edx # 4-byte Reload
+ jne .LBB93_6
+# BB#5:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB93_6:
+ movl %edx, 32(%eax)
+ movl 28(%esp), %edx # 4-byte Reload
+ jne .LBB93_8
+# BB#7:
+ movl %ebp, %edx
+.LBB93_8:
+ movl %edx, 36(%eax)
+ jne .LBB93_10
+# BB#9:
+ movl %ebx, %ecx
+.LBB93_10:
+ movl %ecx, 40(%eax)
+ movl %edi, 44(%eax)
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end93:
+ .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L
+
+ .globl mcl_fpDbl_sub6L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub6L,@function
+mcl_fpDbl_sub6L: # @mcl_fpDbl_sub6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 48(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %edi
+ movl 52(%esp), %esi
+ subl (%esi), %eax
+ sbbl 4(%esi), %edi
+ movl 8(%edx), %ebx
+ sbbl 8(%esi), %ebx
+ movl 44(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edx), %eax
+ sbbl 12(%esi), %eax
+ movl %edi, 4(%ecx)
+ movl 16(%edx), %edi
+ sbbl 16(%esi), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%esi), %ebx
+ movl %eax, 12(%ecx)
+ movl 20(%edx), %eax
+ sbbl %ebx, %eax
+ movl 24(%esi), %ebx
+ movl %edi, 16(%ecx)
+ movl 24(%edx), %edi
+ sbbl %ebx, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 28(%esi), %edi
+ movl %eax, 20(%ecx)
+ movl 28(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 32(%esi), %edi
+ movl 32(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 36(%esi), %edi
+ movl 36(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 40(%esi), %edi
+ movl 40(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 44(%esi), %esi
+ movl 44(%edx), %eax
+ sbbl %esi, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl $0, %ebx
+ sbbl $0, %ebx
+ andl $1, %ebx
+ movl 56(%esp), %eax
+ jne .LBB94_1
+# BB#2:
+ xorl %edx, %edx
+ jmp .LBB94_3
+.LBB94_1:
+ movl 20(%eax), %edx
+.LBB94_3:
+ testb %bl, %bl
+ jne .LBB94_4
+# BB#5:
+ movl $0, %esi
+ movl $0, %edi
+ jmp .LBB94_6
+.LBB94_4:
+ movl (%eax), %edi
+ movl 4(%eax), %esi
+.LBB94_6:
+ jne .LBB94_7
+# BB#8:
+ movl $0, %ebx
+ jmp .LBB94_9
+.LBB94_7:
+ movl 16(%eax), %ebx
+.LBB94_9:
+ jne .LBB94_10
+# BB#11:
+ movl $0, %ebp
+ jmp .LBB94_12
+.LBB94_10:
+ movl 12(%eax), %ebp
+.LBB94_12:
+ jne .LBB94_13
+# BB#14:
+ xorl %eax, %eax
+ jmp .LBB94_15
+.LBB94_13:
+ movl 8(%eax), %eax
+.LBB94_15:
+ addl 8(%esp), %edi # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ movl %edi, 24(%ecx)
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 28(%ecx)
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 36(%ecx)
+ movl %ebx, 40(%ecx)
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%ecx)
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end94:
+ .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L
+
+ .globl mcl_fp_mulUnitPre7L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre7L,@function
+mcl_fp_mulUnitPre7L: # @mcl_fp_mulUnitPre7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 64(%esp), %esi
+ movl 60(%esp), %ebx
+ movl %esi, %eax
+ mull 24(%ebx)
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 20(%ebx)
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 16(%ebx)
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 12(%ebx)
+ movl %edx, %ebp
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 8(%ebx)
+ movl %edx, %ecx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 4(%ebx)
+ movl %edx, %edi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull (%ebx)
+ movl 56(%esp), %esi
+ movl %eax, (%esi)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%esi)
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esi)
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esi)
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esi)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 28(%esi)
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end95:
+ .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L
+
+ .globl mcl_fpDbl_mulPre7L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre7L,@function
+mcl_fpDbl_mulPre7L: # @mcl_fpDbl_mulPre7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 124(%esp), %ebx
+ movl (%ebx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx
+ movl (%ecx), %edi
+ movl %ecx, %ebp
+ mull %edi
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 4(%ebx), %ecx
+ movl 8(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 16(%ebx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 20(%ebx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 24(%ebx), %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ movl 4(%ebp), %ebp
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebp
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, %eax
+ mull %ebp
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 8(%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ mull %ebp
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edi
+ movl %edx, %ebx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %edi
+ movl %edx, %ebp
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, %ecx
+ movl 24(%esp), %esi # 4-byte Reload
+ addl 96(%esp), %esi # 4-byte Folded Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebx, %edi
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl 80(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ addl 52(%esp), %esi # 4-byte Folded Reload
+ movl 120(%esp), %eax
+ movl %esi, 4(%eax)
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, %ecx
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %esi
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 96(%esp), %ebp # 4-byte Reload
+ addl 84(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 80(%esp) # 4-byte Spill
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 124(%esp), %esi
+ movl 24(%esi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl 8(%eax), %edi
+ movl %ecx, %eax
+ mull %edi
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ mull %edi
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ mull %edi
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ mull %edi
+ movl %eax, %ebp
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ mull %edi
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl (%esi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 4(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ mull %edi
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 28(%esp) # 4-byte Spill
+ addl 96(%esp), %eax # 4-byte Folded Reload
+ movl 120(%esp), %edx
+ movl %eax, 8(%edx)
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 92(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 84(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl 128(%esp), %eax
+ movl 12(%eax), %ecx
+ movl 16(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl (%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 4(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edx, 36(%esp) # 4-byte Spill
+ andl $1, %edi
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ addl 4(%esp), %ebx # 4-byte Folded Reload
+ movl 120(%esp), %ebp
+ movl %ebx, 12(%ebp)
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ movl %esi, %ebx
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %esi
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edx
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 124(%esp), %ebx
+ movl 24(%ebx), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl 16(%eax), %ecx
+ movl %edx, %eax
+ mull %ecx
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 16(%ebx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 8(%ebx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl (%ebx), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 4(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ mull %ecx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, (%esp) # 4-byte Spill
+ addl %ebp, %eax
+ movl 120(%esp), %ecx
+ movl %eax, 16(%ecx)
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %edi
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 88(%esp), %esi # 4-byte Folded Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ addl (%esp), %edi # 4-byte Folded Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 92(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl 20(%eax), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 40(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 40(%esp) # 4-byte Spill
+ addl %edi, %eax
+ movl 120(%esp), %edx
+ movl %eax, 20(%edx)
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 84(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 96(%esp) # 4-byte Folded Spill
+ adcl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl 24(%eax), %ecx
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl 124(%esp), %edi
+ mull 24(%edi)
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 20(%edi)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 16(%edi)
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 12(%edi)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 8(%edi)
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 4(%edi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull (%edi)
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edx, 12(%esp) # 4-byte Spill
+ andl $1, %esi
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ addl (%esp), %ebx # 4-byte Folded Reload
+ movl 120(%esp), %ecx
+ movl %ebx, 24(%ecx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %ebx
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edx
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 12(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 28(%ecx)
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 32(%ecx)
+ movl 96(%esp), %ebx # 4-byte Reload
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, 36(%ecx)
+ movl %edi, %edx
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 40(%ecx)
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 44(%ecx)
+ movl %esi, 48(%ecx)
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end96:
+ .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L
+
+ .globl mcl_fpDbl_sqrPre7L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre7L,@function
+mcl_fpDbl_sqrPre7L: # @mcl_fpDbl_sqrPre7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 124(%esp), %esi
+ movl 24(%esi), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl (%esi), %ebx
+ movl 4(%esi), %edi
+ mull %edi
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ mull %edi
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 16(%esi), %ecx
+ movl %ecx, %eax
+ mull %edi
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 12(%esi), %esi
+ movl %esi, %eax
+ mull %edi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl 8(%eax), %ebp
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ebx
+ movl %edx, %ebp
+ movl %eax, %ecx
+ movl %edi, %eax
+ mull %edi
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ebx
+ movl %edx, %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull %ebx
+ movl 120(%esp), %ebx
+ movl %eax, (%ebx)
+ addl %edi, %edx
+ adcl %esi, %ecx
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %esi
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl %eax, 92(%esp) # 4-byte Folded Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 96(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edi, %edx
+ movl %edx, 4(%ebx)
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebx
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %edi
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %esi
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 72(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebp # 4-byte Folded Reload
+ movl 124(%esp), %edi
+ movl 24(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 8(%edi), %esi
+ mull %esi
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 20(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ mull %esi
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 16(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ mull %esi
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 12(%edi), %ebx
+ movl %ebx, %eax
+ mull %esi
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl (%edi), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 4(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ mull %esi
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %esi, %eax
+ mull %esi
+ movl %eax, %ecx
+ movl %edx, 4(%esp) # 4-byte Spill
+ addl 28(%esp), %edi # 4-byte Folded Reload
+ movl 120(%esp), %eax
+ movl %edi, 8(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ adcl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 84(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 88(%esp), %edi # 4-byte Folded Reload
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %ebp, %eax
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl %edx, 56(%esp) # 4-byte Folded Spill
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl 20(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull %ebx
+ movl %eax, %ebx
+ movl %edx, 16(%esp) # 4-byte Spill
+ addl 68(%esp), %edi # 4-byte Folded Reload
+ movl 120(%esp), %eax
+ movl %edi, 12(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 96(%esp) # 4-byte Spill
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ adcl %ebp, 92(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edi
+ movl 20(%eax), %ebx
+ movl %edi, %eax
+ mull %ebx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl 16(%eax), %ebp
+ movl %edi, %eax
+ mull %ebp
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ addl %eax, 56(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 60(%esp) # 4-byte Folded Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 92(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 124(%esp), %esi
+ movl 24(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ mull %ebp
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ mull %ebp
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ mull %ebp
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull %ebp
+ movl %eax, %esi
+ movl %edx, (%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ movl 120(%esp), %eax
+ movl %ebp, 16(%eax)
+ movl %ecx, %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %edi, %ebp
+ adcl 96(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ adcl 88(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl 32(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull %ebx
+ movl %eax, %esi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ movl 120(%esp), %edx
+ movl %eax, 20(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, %edx
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl %ebp, %ebx
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl %edi, %ebp
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 124(%esp), %esi
+ movl 24(%esi), %ecx
+ movl %ecx, %eax
+ mull 20(%esi)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 16(%esi)
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 12(%esi)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 8(%esi)
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 4(%esi)
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull (%esi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull %ecx
+ movl %edx, 52(%esp) # 4-byte Spill
+ addl 80(%esp), %esi # 4-byte Folded Reload
+ movl 120(%esp), %edx
+ movl %esi, 24(%edx)
+ movl %edx, %esi
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ adcl 84(%esp), %ebp # 4-byte Folded Reload
+ adcl 92(%esp), %ebx # 4-byte Folded Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 96(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl 36(%esp), %edi # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 28(%esi)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 32(%esi)
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 36(%esi)
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 40(%esi)
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 44(%esi)
+ movl %eax, 48(%esi)
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end97:
+ .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L
+
+ .globl mcl_fp_mont7L
+ .align 16, 0x90
+ .type mcl_fp_mont7L,@function
+mcl_fp_mont7L: # @mcl_fp_mont7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $152, %esp
+ movl 176(%esp), %esi
+ movl (%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 180(%esp), %edx
+ movl (%edx), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 184(%esp), %ecx
+ movl -4(%ecx), %edx
+ movl %edx, 132(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ imull %edx, %ebx
+ movl (%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 24(%ecx), %edx
+ movl %edx, 120(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 4(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 4(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl %esi, %eax
+ movl 24(%eax), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 20(%eax), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 16(%eax), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 12(%eax), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 8(%eax), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl 72(%esp), %ecx # 4-byte Reload
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, %edi
+ movl %eax, (%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, %ebx
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ mull %ecx
+ addl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, (%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ addl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 28(%esp), %ebp # 4-byte Reload
+ addl 80(%esp), %ebp # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 88(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl (%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 72(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 4(%eax), %ecx
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 84(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 68(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl 88(%esp), %ecx # 4-byte Reload
+ imull 132(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 88(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 84(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 180(%esp), %eax
+ movl 8(%eax), %ebx
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, %edi
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 72(%esp), %esi # 4-byte Reload
+ addl %ebp, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl %edi, 44(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl %edi, 48(%esp) # 4-byte Folded Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %esi, %ecx
+ imull 132(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 72(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 88(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 180(%esp), %eax
+ movl 12(%eax), %ebx
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, %edi
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 72(%esp), %esi # 4-byte Reload
+ addl %ebp, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl %edi, 48(%esp) # 4-byte Folded Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl %edi, 52(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %esi, %ecx
+ imull 132(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 72(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 88(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 180(%esp), %eax
+ movl 16(%eax), %ebx
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, %edi
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 72(%esp), %esi # 4-byte Reload
+ addl %ebp, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl %edi, 48(%esp) # 4-byte Folded Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl %edi, 52(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %esi, %ecx
+ imull 132(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 72(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 88(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 180(%esp), %eax
+ movl 20(%eax), %ebx
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, %edi
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 72(%esp), %esi # 4-byte Reload
+ addl %ebp, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl %edi, 48(%esp) # 4-byte Folded Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl %edi, 52(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %esi, %ecx
+ imull 132(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl %ebp, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 72(%esp), %ebx # 4-byte Folded Reload
+ movl 20(%esp), %ebx # 4-byte Reload
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 88(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 180(%esp), %eax
+ movl 24(%eax), %ebp
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, %edi
+ adcl 112(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl 92(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx # 4-byte Folded Reload
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 116(%esp), %esi # 4-byte Reload
+ addl %ebx, %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl %edi, 112(%esp) # 4-byte Folded Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl %edi, 104(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 100(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl 132(%esp), %ecx # 4-byte Reload
+ imull %esi, %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ andl $1, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl 132(%esp), %eax # 4-byte Reload
+ mull 124(%esp) # 4-byte Folded Reload
+ addl 56(%esp), %eax # 4-byte Folded Reload
+ adcl %edi, %edx
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 132(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl 64(%esp), %esi # 4-byte Reload
+ addl 116(%esp), %esi # 4-byte Folded Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ adcl 112(%esp), %edx # 4-byte Folded Reload
+ adcl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 116(%esp) # 4-byte Spill
+ adcl 100(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %esi # 4-byte Reload
+ adcl 88(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 132(%esp) # 4-byte Spill
+ adcl 84(%esp), %edi # 4-byte Folded Reload
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ movl %eax, %esi
+ subl 128(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 124(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 124(%esp) # 4-byte Spill
+ sbbl 136(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 128(%esp) # 4-byte Spill
+ sbbl 140(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 136(%esp) # 4-byte Spill
+ sbbl 144(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 140(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ movl %ecx, %ebx
+ movl %ecx, %ebp
+ sbbl 148(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 144(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ sbbl 120(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 148(%esp) # 4-byte Spill
+ movl 96(%esp), %ebx # 4-byte Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB98_2
+# BB#1:
+ movl 108(%esp), %eax # 4-byte Reload
+.LBB98_2:
+ movl 172(%esp), %esi
+ movl %eax, (%esi)
+ testb %bl, %bl
+ jne .LBB98_4
+# BB#3:
+ movl 124(%esp), %edx # 4-byte Reload
+.LBB98_4:
+ movl %edx, 4(%esi)
+ movl 104(%esp), %ecx # 4-byte Reload
+ jne .LBB98_6
+# BB#5:
+ movl 128(%esp), %ecx # 4-byte Reload
+.LBB98_6:
+ movl %ecx, 8(%esi)
+ movl 112(%esp), %ecx # 4-byte Reload
+ movl 116(%esp), %eax # 4-byte Reload
+ jne .LBB98_8
+# BB#7:
+ movl 136(%esp), %eax # 4-byte Reload
+.LBB98_8:
+ movl %eax, 12(%esi)
+ jne .LBB98_10
+# BB#9:
+ movl 140(%esp), %ecx # 4-byte Reload
+.LBB98_10:
+ movl %ecx, 16(%esi)
+ jne .LBB98_12
+# BB#11:
+ movl 144(%esp), %ebp # 4-byte Reload
+.LBB98_12:
+ movl %ebp, 20(%esi)
+ jne .LBB98_14
+# BB#13:
+ movl 148(%esp), %edi # 4-byte Reload
+.LBB98_14:
+ movl %edi, 24(%esi)
+ addl $152, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end98:
+ .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L
+
+ .globl mcl_fp_montNF7L
+ .align 16, 0x90
+ .type mcl_fp_montNF7L,@function
+mcl_fp_montNF7L: # @mcl_fp_montNF7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $152, %esp
+ movl 176(%esp), %ebp
+ movl (%ebp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 180(%esp), %ecx
+ movl (%ecx), %ecx
+ mull %ecx
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 184(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %eax, %edi
+ imull %edx, %edi
+ movl (%esi), %edx
+ movl %edx, 148(%esp) # 4-byte Spill
+ movl 24(%esi), %edx
+ movl %edx, 124(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 4(%esi), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 4(%ebp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 24(%ebp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 20(%ebp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 12(%ebp), %ebx
+ movl %ebx, 96(%esp) # 4-byte Spill
+ movl 8(%ebp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, %edi
+ addl 84(%esp), %edi # 4-byte Folded Reload
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ adcl %ebx, %ebp
+ movl %esi, %edx
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 16(%esp), %ebx # 4-byte Reload
+ addl 88(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, %ebx
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %eax
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 4(%eax), %ecx
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ addl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl %ebx, 48(%esp) # 4-byte Folded Spill
+ adcl %edi, 52(%esp) # 4-byte Folded Spill
+ movl %ebp, %edi
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 56(%esp) # 4-byte Folded Spill
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, %ecx
+ imull 108(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ addl %ebp, %eax
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ adcl %edi, %esi
+ movl %esi, %edi
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 8(%eax), %ebp
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl %ebx, %ebp
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl %edi, %esi
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 44(%esp), %edi # 4-byte Reload
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, %ebx
+ imull 108(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ addl %edi, %eax
+ adcl %ebp, %ecx
+ movl %ecx, %esi
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %ebx # 4-byte Reload
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 12(%eax), %edi
+ movl %edi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %edi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ addl %ebp, %ebx
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %edi
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 44(%esp), %esi # 4-byte Reload
+ addl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 72(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, %ebp
+ imull 108(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebp, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ addl %edi, %eax
+ adcl %ebx, %esi
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %ebx # 4-byte Reload
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 16(%eax), %ebp
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ addl %ebx, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl %esi, %edi
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 24(%esp), %ecx # 4-byte Reload
+ addl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 72(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ imull 108(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %esi, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %esi, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl %edi, %edx
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 20(%eax), %ebp
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl %ebx, %ebp
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %ebx
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl %edi, %edx
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 52(%esp), %edi # 4-byte Reload
+ addl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %esi
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, %ebx
+ imull 108(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ addl %edi, %eax
+ adcl %ebp, %ecx
+ movl %ecx, %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl 76(%esp), %ebx # 4-byte Reload
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl %esi, %edi
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 24(%eax), %edi
+ movl %edi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %ebp
+ movl %edi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl %ebp, %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ adcl %esi, %edi
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ addl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 104(%esp) # 4-byte Folded Spill
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ imull %ecx, %edi
+ movl %edi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %edi, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ addl %ecx, %ebp
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edx
+ adcl 72(%esp), %esi # 4-byte Folded Reload
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl 108(%esp), %ebx # 4-byte Reload
+ adcl 92(%esp), %ebx # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 60(%esp), %edx # 4-byte Folded Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 108(%esp) # 4-byte Spill
+ adcl 84(%esp), %ebp # 4-byte Folded Reload
+ adcl 88(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 112(%esp) # 4-byte Spill
+ movl %edx, %eax
+ subl 148(%esp), %eax # 4-byte Folded Reload
+ sbbl 128(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 128(%esp) # 4-byte Spill
+ sbbl 132(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ movl %edx, %esi
+ sbbl 136(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 136(%esp) # 4-byte Spill
+ sbbl 140(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 148(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ movl %ebp, %ecx
+ movl %ebx, %ebp
+ sbbl 144(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, %ebx
+ sbbl 124(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %edi
+ sarl $31, %edi
+ testl %edi, %edi
+ js .LBB99_2
+# BB#1:
+ movl %eax, %esi
+.LBB99_2:
+ movl 172(%esp), %edx
+ movl %esi, (%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ js .LBB99_4
+# BB#3:
+ movl 128(%esp), %eax # 4-byte Reload
+.LBB99_4:
+ movl %eax, 4(%edx)
+ movl %ecx, %eax
+ movl 116(%esp), %ecx # 4-byte Reload
+ js .LBB99_6
+# BB#5:
+ movl 132(%esp), %ecx # 4-byte Reload
+.LBB99_6:
+ movl %ecx, 8(%edx)
+ movl 108(%esp), %esi # 4-byte Reload
+ movl 120(%esp), %ecx # 4-byte Reload
+ js .LBB99_8
+# BB#7:
+ movl 136(%esp), %ecx # 4-byte Reload
+.LBB99_8:
+ movl %ecx, 12(%edx)
+ js .LBB99_10
+# BB#9:
+ movl 148(%esp), %esi # 4-byte Reload
+.LBB99_10:
+ movl %esi, 16(%edx)
+ js .LBB99_12
+# BB#11:
+ movl %ebp, %eax
+.LBB99_12:
+ movl %eax, 20(%edx)
+ js .LBB99_14
+# BB#13:
+ movl %ebx, 112(%esp) # 4-byte Spill
+.LBB99_14:
+ movl 112(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%edx)
+ addl $152, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end99:
+ .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L
+
+ .globl mcl_fp_montRed7L
+ .align 16, 0x90
+ .type mcl_fp_montRed7L,@function
+mcl_fp_montRed7L: # @mcl_fp_montRed7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $124, %esp
+ movl 152(%esp), %eax
+ movl -4(%eax), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl (%eax), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx
+ movl (%ecx), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ imull %edx, %ecx
+ movl 24(%eax), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 20(%eax), %edx
+ movl %edx, 120(%esp) # 4-byte Spill
+ movl 16(%eax), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 12(%eax), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 8(%eax), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 4(%eax), %ebx
+ movl %ebx, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ addl %ebx, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ adcl 80(%esp), %ebp # 4-byte Folded Reload
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 84(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl %edi, %esi
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 116(%esp), %eax # 4-byte Folded Reload
+ movl 148(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 4(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 8(%ecx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 12(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 16(%ecx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ adcl 20(%ecx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 24(%ecx), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 28(%ecx), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl 48(%ecx), %edi
+ movl 44(%ecx), %edx
+ movl 40(%ecx), %ebx
+ movl 36(%ecx), %ebp
+ movl 32(%ecx), %eax
+ adcl $0, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ imull 88(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %eax, %ecx
+ addl %ebx, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl %ebp, %edi
+ movl %esi, %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 72(%esp), %ecx # 4-byte Folded Reload
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ imull 88(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %esi
+ movl %edi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %eax, %edi
+ addl %esi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %ebx, %ebp
+ movl %ebp, %eax
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl 60(%esp), %ebx # 4-byte Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 28(%esp), %edi # 4-byte Folded Reload
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl %edi, %esi
+ imull 88(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %esi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, %ebp
+ movl %esi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %eax, %edi
+ addl %ebp, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl %ecx, %ebx
+ movl %ebx, %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 32(%esp), %edi # 4-byte Folded Reload
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl %edi, %ebp
+ imull 88(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ecx
+ movl %ebp, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %ecx, %edi
+ movl %edi, %ecx
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl %ebp, %edi
+ imull 88(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl 32(%esp), %ebp # 4-byte Folded Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ imull %ebp, %ecx
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ addl %edi, %eax
+ movl %eax, %edi
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 80(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 80(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl 28(%esp), %ebx # 4-byte Reload
+ addl 44(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 80(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %eax
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 116(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %edi, %edx
+ movl %edx, %ecx
+ subl 112(%esp), %ecx # 4-byte Folded Reload
+ sbbl 96(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 100(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 96(%esp) # 4-byte Spill
+ sbbl 108(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ sbbl 104(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %esi, %ebp
+ sbbl 120(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ sbbl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 120(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ movl %ebx, 116(%esp) # 4-byte Spill
+ jne .LBB100_2
+# BB#1:
+ movl %ecx, %edx
+.LBB100_2:
+ movl 144(%esp), %edi
+ movl %edx, (%edi)
+ movl 116(%esp), %eax # 4-byte Reload
+ testb %al, %al
+ movl 64(%esp), %eax # 4-byte Reload
+ jne .LBB100_4
+# BB#3:
+ movl 84(%esp), %eax # 4-byte Reload
+.LBB100_4:
+ movl %eax, 4(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ jne .LBB100_6
+# BB#5:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB100_6:
+ movl %eax, 8(%edi)
+ movl 88(%esp), %eax # 4-byte Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB100_8
+# BB#7:
+ movl 100(%esp), %ecx # 4-byte Reload
+.LBB100_8:
+ movl %ecx, 12(%edi)
+ jne .LBB100_10
+# BB#9:
+ movl 108(%esp), %eax # 4-byte Reload
+.LBB100_10:
+ movl %eax, 16(%edi)
+ jne .LBB100_12
+# BB#11:
+ movl 112(%esp), %ebp # 4-byte Reload
+.LBB100_12:
+ movl %ebp, 20(%edi)
+ jne .LBB100_14
+# BB#13:
+ movl 120(%esp), %esi # 4-byte Reload
+.LBB100_14:
+ movl %esi, 24(%edi)
+ addl $124, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end100:
+ .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L
+
+ .globl mcl_fp_addPre7L
+ .align 16, 0x90
+ .type mcl_fp_addPre7L,@function
+mcl_fp_addPre7L: # @mcl_fp_addPre7L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 20(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %edi
+ adcl 8(%esi), %edi
+ movl 16(%esp), %ebx
+ movl %ecx, (%ebx)
+ movl 12(%esi), %ecx
+ movl %edx, 4(%ebx)
+ movl 16(%esi), %edx
+ adcl 12(%eax), %ecx
+ adcl 16(%eax), %edx
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %ecx, 12(%ebx)
+ movl 20(%esi), %ecx
+ adcl %edi, %ecx
+ movl %edx, 16(%ebx)
+ movl %ecx, 20(%ebx)
+ movl 24(%eax), %eax
+ movl 24(%esi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 24(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end101:
+ .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L
+
+ .globl mcl_fp_subPre7L
+ .align 16, 0x90
+ .type mcl_fp_subPre7L,@function
+mcl_fp_subPre7L: # @mcl_fp_subPre7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 28(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edi), %ebx
+ movl 20(%esp), %ebp
+ movl %edx, (%ebp)
+ movl 12(%ecx), %edx
+ sbbl 12(%edi), %edx
+ movl %esi, 4(%ebp)
+ movl 16(%ecx), %esi
+ sbbl 16(%edi), %esi
+ movl %ebx, 8(%ebp)
+ movl 20(%edi), %ebx
+ movl %edx, 12(%ebp)
+ movl 20(%ecx), %edx
+ sbbl %ebx, %edx
+ movl %esi, 16(%ebp)
+ movl %edx, 20(%ebp)
+ movl 24(%edi), %edx
+ movl 24(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 24(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end102:
+ .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L
+
+ .globl mcl_fp_shr1_7L
+ .align 16, 0x90
+ .type mcl_fp_shr1_7L,@function
+mcl_fp_shr1_7L: # @mcl_fp_shr1_7L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 20(%esi)
+ shrl %eax
+ movl %eax, 24(%esi)
+ popl %esi
+ retl
+.Lfunc_end103:
+ .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L
+
+ .globl mcl_fp_add7L
+ .align 16, 0x90
+ .type mcl_fp_add7L,@function
+mcl_fp_add7L: # @mcl_fp_add7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %ebp
+ movl (%ebp), %eax
+ movl 4(%ebp), %edi
+ movl 44(%esp), %ecx
+ addl (%ecx), %eax
+ adcl 4(%ecx), %edi
+ movl 8(%ebp), %esi
+ adcl 8(%ecx), %esi
+ movl 12(%ecx), %edx
+ movl 16(%ecx), %ebx
+ adcl 12(%ebp), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 16(%ebp), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ movl 20(%ecx), %ebp
+ adcl 20(%ebx), %ebp
+ movl 24(%ecx), %edx
+ adcl 24(%ebx), %edx
+ movl 40(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, 4(%ecx)
+ movl %esi, 8(%ecx)
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%ecx)
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%ecx)
+ movl %ebp, 20(%ecx)
+ movl %edx, 24(%ecx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 52(%esp), %ecx
+ subl (%ecx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl 52(%esp), %eax
+ sbbl 4(%eax), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %eax, %edi
+ sbbl 8(%edi), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ecx
+ movl %ecx, %esi
+ sbbl 20(%edi), %ebp
+ sbbl 24(%edi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB104_2
+# BB#1: # %nocarry
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl 40(%esp), %eax
+ movl %eax, %ebx
+ movl %ecx, (%ebx)
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%ebx)
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebx)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebx)
+ movl %esi, 16(%ebx)
+ movl %ebp, 20(%ebx)
+ movl %edx, 24(%ebx)
+.LBB104_2: # %carry
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end104:
+ .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L
+
+ .globl mcl_fp_addNF7L
+ .align 16, 0x90
+ .type mcl_fp_addNF7L,@function
+mcl_fp_addNF7L: # @mcl_fp_addNF7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 80(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 76(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 4(%esi), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 20(%eax), %ebx
+ movl 16(%eax), %edi
+ movl 12(%eax), %ebp
+ movl 8(%eax), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 12(%esi), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 24(%esi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 44(%esp), %esi # 4-byte Reload
+ subl (%eax), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ sbbl 4(%eax), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 8(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%eax), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ sbbl 16(%eax), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ sbbl 20(%eax), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ sbbl 24(%eax), %edi
+ movl %edi, %ecx
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ js .LBB105_2
+# BB#1:
+ movl (%esp), %esi # 4-byte Reload
+.LBB105_2:
+ movl 72(%esp), %ecx
+ movl %esi, (%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ js .LBB105_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB105_4:
+ movl %eax, 4(%ecx)
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ movl 32(%esp), %esi # 4-byte Reload
+ movl 24(%esp), %ebx # 4-byte Reload
+ js .LBB105_6
+# BB#5:
+ movl 8(%esp), %ebx # 4-byte Reload
+.LBB105_6:
+ movl 72(%esp), %eax
+ movl %ebx, 8(%eax)
+ movl %eax, %ebx
+ js .LBB105_8
+# BB#7:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB105_8:
+ movl %esi, 12(%ebx)
+ js .LBB105_10
+# BB#9:
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB105_10:
+ movl %edx, 16(%ebx)
+ js .LBB105_12
+# BB#11:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB105_12:
+ movl %ecx, 20(%ebx)
+ js .LBB105_14
+# BB#13:
+ movl %edi, %ebp
+.LBB105_14:
+ movl %ebp, 24(%ebx)
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end105:
+ .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L
+
+ .globl mcl_fp_sub7L
+ .align 16, 0x90
+ .type mcl_fp_sub7L,@function
+mcl_fp_sub7L: # @mcl_fp_sub7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 48(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ xorl %ebx, %ebx
+ movl 52(%esp), %esi
+ subl (%esi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 4(%esi), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edi), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 12(%edi), %ecx
+ sbbl 12(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 16(%edi), %eax
+ sbbl 16(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 20(%edi), %ebp
+ sbbl 20(%esi), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 24(%edi), %edi
+ sbbl 24(%esi), %edi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 44(%esp), %ebx
+ movl 16(%esp), %esi # 4-byte Reload
+ movl %esi, (%ebx)
+ movl 20(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl %ecx, 12(%ebx)
+ movl %eax, 16(%ebx)
+ movl %ebp, 20(%ebx)
+ movl %edi, 24(%ebx)
+ je .LBB106_2
+# BB#1: # %carry
+ movl 56(%esp), %ebp
+ movl 16(%esp), %ecx # 4-byte Reload
+ addl (%ebp), %ecx
+ movl %ecx, (%ebx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 4(%ebp), %edx
+ movl %edx, 4(%ebx)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 8(%ebp), %ecx
+ movl 12(%ebp), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%ebp), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl %ecx, 16(%ebx)
+ movl 20(%ebp), %eax
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 24(%ebp), %eax
+ adcl %edi, %eax
+ movl %eax, 24(%ebx)
+.LBB106_2: # %nocarry
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end106:
+ .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L
+
+ .globl mcl_fp_subNF7L
+ .align 16, 0x90
+ .type mcl_fp_subNF7L,@function
+mcl_fp_subNF7L: # @mcl_fp_subNF7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edx
+ movl 60(%esp), %ecx
+ subl (%ecx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ sbbl 4(%ecx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 24(%eax), %edx
+ movl 20(%eax), %esi
+ movl 16(%eax), %edi
+ movl 12(%eax), %ebx
+ movl 8(%eax), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ sarl $31, %ecx
+ movl %ecx, %eax
+ shldl $1, %edx, %eax
+ movl 64(%esp), %edx
+ andl (%edx), %eax
+ movl 24(%edx), %esi
+ andl %ecx, %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 20(%edx), %ebx
+ andl %ecx, %ebx
+ movl 16(%edx), %edi
+ andl %ecx, %edi
+ movl 12(%edx), %esi
+ andl %ecx, %esi
+ movl 64(%esp), %edx
+ movl 8(%edx), %edx
+ andl %ecx, %edx
+ movl 64(%esp), %ebp
+ andl 4(%ebp), %ecx
+ addl 20(%esp), %eax # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ebp
+ movl %eax, (%ebp)
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, %eax
+ movl %ecx, 4(%eax)
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 8(%eax)
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 12(%eax)
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 16(%eax)
+ movl %ebx, 20(%eax)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%eax)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end107:
+ .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L
+
+ .globl mcl_fpDbl_add7L
+ .align 16, 0x90
+ .type mcl_fpDbl_add7L,@function
+mcl_fpDbl_add7L: # @mcl_fpDbl_add7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 72(%esp), %esi
+ movl 68(%esp), %edx
+ movl 12(%edx), %edi
+ movl 16(%edx), %ecx
+ movl 8(%esi), %eax
+ movl (%esi), %ebx
+ addl (%edx), %ebx
+ movl 64(%esp), %ebp
+ movl %ebx, (%ebp)
+ movl 4(%esi), %ebx
+ adcl 4(%edx), %ebx
+ adcl 8(%edx), %eax
+ adcl 12(%esi), %edi
+ adcl 16(%esi), %ecx
+ movl %ebx, 4(%ebp)
+ movl %esi, %ebx
+ movl 36(%ebx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl %eax, 8(%ebp)
+ movl 20(%ebx), %eax
+ movl %edi, 12(%ebp)
+ movl 20(%edx), %edi
+ adcl %eax, %edi
+ movl 24(%ebx), %eax
+ movl %ecx, 16(%ebp)
+ movl 24(%edx), %ecx
+ adcl %eax, %ecx
+ movl 28(%ebx), %eax
+ movl %edi, 20(%ebp)
+ movl 28(%edx), %edi
+ adcl %eax, %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 32(%ebx), %eax
+ movl %ecx, 24(%ebp)
+ movl 32(%edx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%edx), %esi
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl 40(%ebx), %ecx
+ movl 40(%edx), %eax
+ adcl %ecx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%ebx), %ebp
+ movl 44(%edx), %ecx
+ adcl %ebp, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 48(%ebx), %ebp
+ movl %ebx, %eax
+ movl 48(%edx), %ebx
+ adcl %ebp, %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 52(%eax), %eax
+ movl 52(%edx), %ebp
+ adcl %eax, %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 76(%esp), %eax
+ subl (%eax), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ sbbl 4(%eax), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ movl 76(%esp), %edi
+ sbbl 8(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 20(%edi), %ebx
+ sbbl 24(%edi), %ebp
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB108_2
+# BB#1:
+ movl %ebp, 32(%esp) # 4-byte Spill
+.LBB108_2:
+ testb %dl, %dl
+ movl 20(%esp), %ecx # 4-byte Reload
+ jne .LBB108_4
+# BB#3:
+ movl (%esp), %esi # 4-byte Reload
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB108_4:
+ movl 64(%esp), %eax
+ movl %ecx, 28(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl %esi, 36(%eax)
+ movl 24(%esp), %edx # 4-byte Reload
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB108_6
+# BB#5:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB108_6:
+ movl %ecx, 40(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ jne .LBB108_8
+# BB#7:
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB108_8:
+ movl %edx, 44(%eax)
+ jne .LBB108_10
+# BB#9:
+ movl %ebx, %ecx
+.LBB108_10:
+ movl %ecx, 48(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end108:
+ .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L
+
+ .globl mcl_fpDbl_sub7L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub7L,@function
+mcl_fpDbl_sub7L: # @mcl_fpDbl_sub7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %edx
+ movl 60(%esp), %edi
+ subl (%edi), %eax
+ sbbl 4(%edi), %edx
+ movl 8(%esi), %ebx
+ sbbl 8(%edi), %ebx
+ movl 52(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%esi), %edx
+ sbbl 16(%edi), %edx
+ movl %ebx, 8(%ecx)
+ movl 20(%edi), %ebx
+ movl %eax, 12(%ecx)
+ movl 20(%esi), %eax
+ sbbl %ebx, %eax
+ movl 24(%edi), %ebx
+ movl %edx, 16(%ecx)
+ movl 24(%esi), %edx
+ sbbl %ebx, %edx
+ movl 28(%edi), %ebx
+ movl %eax, 20(%ecx)
+ movl 28(%esi), %eax
+ sbbl %ebx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 32(%edi), %eax
+ movl %edx, 24(%ecx)
+ movl 32(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 36(%edi), %eax
+ movl 36(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 40(%edi), %eax
+ movl 40(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 44(%edi), %eax
+ movl 44(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%edi), %eax
+ movl 48(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 52(%edi), %eax
+ movl 52(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 64(%esp), %esi
+ jne .LBB109_1
+# BB#2:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB109_3
+.LBB109_1:
+ movl 24(%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+.LBB109_3:
+ testb %al, %al
+ jne .LBB109_4
+# BB#5:
+ movl $0, %edi
+ movl $0, %eax
+ jmp .LBB109_6
+.LBB109_4:
+ movl (%esi), %eax
+ movl 4(%esi), %edi
+.LBB109_6:
+ jne .LBB109_7
+# BB#8:
+ movl $0, %ebx
+ jmp .LBB109_9
+.LBB109_7:
+ movl 20(%esi), %ebx
+.LBB109_9:
+ jne .LBB109_10
+# BB#11:
+ movl $0, %ebp
+ jmp .LBB109_12
+.LBB109_10:
+ movl 16(%esi), %ebp
+.LBB109_12:
+ jne .LBB109_13
+# BB#14:
+ movl $0, %edx
+ jmp .LBB109_15
+.LBB109_13:
+ movl 12(%esi), %edx
+.LBB109_15:
+ jne .LBB109_16
+# BB#17:
+ xorl %esi, %esi
+ jmp .LBB109_18
+.LBB109_16:
+ movl 8(%esi), %esi
+.LBB109_18:
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 32(%ecx)
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 36(%ecx)
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, 40(%ecx)
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 44(%ecx)
+ movl %ebx, 48(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end109:
+ .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L
+
+ .align 16, 0x90
+ .type .LmulPv256x32,@function
+.LmulPv256x32: # @mulPv256x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $48, %esp
+ movl %edx, %esi
+ movl 68(%esp), %ebx
+ movl %ebx, %eax
+ mull 28(%esi)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 24(%esi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 20(%esi)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 16(%esi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 12(%esi)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 8(%esi)
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 4(%esi)
+ movl %edx, %edi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull (%esi)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%ecx)
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 32(%ecx)
+ movl %ecx, %eax
+ addl $48, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end110:
+ .size .LmulPv256x32, .Lfunc_end110-.LmulPv256x32
+
+ .globl mcl_fp_mulUnitPre8L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre8L,@function
+mcl_fp_mulUnitPre8L: # @mcl_fp_mulUnitPre8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ calll .L111$pb
+.L111$pb:
+ popl %ebx
+.Ltmp2:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp2-.L111$pb), %ebx
+ movl 88(%esp), %eax
+ movl %eax, (%esp)
+ leal 24(%esp), %ecx
+ movl 84(%esp), %edx
+ calll .LmulPv256x32
+ movl 56(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 48(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %esi
+ movl 40(%esp), %edi
+ movl 36(%esp), %ebx
+ movl 32(%esp), %ebp
+ movl 24(%esp), %edx
+ movl 28(%esp), %ecx
+ movl 80(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %ebp, 8(%eax)
+ movl %ebx, 12(%eax)
+ movl %edi, 16(%eax)
+ movl %esi, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end111:
+ .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L
+
+ .globl mcl_fpDbl_mulPre8L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre8L,@function
+mcl_fpDbl_mulPre8L: # @mcl_fpDbl_mulPre8L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $156, %esp
+ calll .L112$pb
+.L112$pb:
+ popl %ebx
+.Ltmp3:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp3-.L112$pb), %ebx
+ movl %ebx, -96(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 8(%esp)
+ movl 12(%ebp), %edi
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre4L@PLT
+ leal 16(%esi), %eax
+ movl %eax, 8(%esp)
+ leal 16(%edi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 32(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre4L@PLT
+ movl 24(%edi), %esi
+ movl (%edi), %ebx
+ movl 4(%edi), %eax
+ addl 16(%edi), %ebx
+ movl %ebx, -120(%ebp) # 4-byte Spill
+ adcl 20(%edi), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ adcl 8(%edi), %esi
+ movl %esi, -108(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -80(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ addl 16(%edi), %eax
+ adcl 20(%edi), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ movl 24(%edi), %edx
+ adcl 8(%edi), %edx
+ movl 28(%edi), %ecx
+ adcl 12(%edi), %ecx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -128(%ebp) # 4-byte Spill
+ jb .LBB112_2
+# BB#1:
+ xorl %esi, %esi
+ xorl %ebx, %ebx
+.LBB112_2:
+ movl %ebx, -112(%ebp) # 4-byte Spill
+ movl %esi, -104(%ebp) # 4-byte Spill
+ movl 12(%ebp), %esi
+ movl 28(%esi), %edi
+ movl -80(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 12(%esi), %edi
+ movl %edi, -116(%ebp) # 4-byte Spill
+ movl %ecx, -84(%ebp) # 4-byte Spill
+ movl %edx, %edi
+ movl -124(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -80(%ebp) # 4-byte Spill
+ movl %eax, -92(%ebp) # 4-byte Spill
+ jb .LBB112_4
+# BB#3:
+ movl $0, -84(%ebp) # 4-byte Folded Spill
+ movl $0, %edi
+ movl $0, -80(%ebp) # 4-byte Folded Spill
+ movl $0, -92(%ebp) # 4-byte Folded Spill
+.LBB112_4:
+ movl %edi, -88(%ebp) # 4-byte Spill
+ movl -120(%ebp), %esi # 4-byte Reload
+ movl %esi, -60(%ebp)
+ movl -100(%ebp), %edi # 4-byte Reload
+ movl %edi, -56(%ebp)
+ movl -108(%ebp), %esi # 4-byte Reload
+ movl %esi, -52(%ebp)
+ movl %eax, -76(%ebp)
+ movl %ebx, -72(%ebp)
+ movl %edx, -68(%ebp)
+ movl %ecx, -64(%ebp)
+ sbbl %edx, %edx
+ movl -116(%ebp), %esi # 4-byte Reload
+ movl %esi, -48(%ebp)
+ movl -128(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB112_6
+# BB#5:
+ movl $0, %esi
+ movl $0, %edi
+.LBB112_6:
+ sbbl %eax, %eax
+ leal -76(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -60(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -44(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl %edi, %eax
+ movl -92(%ebp), %edi # 4-byte Reload
+ addl -112(%ebp), %edi # 4-byte Folded Reload
+ adcl %eax, -80(%ebp) # 4-byte Folded Spill
+ movl -104(%ebp), %eax # 4-byte Reload
+ adcl %eax, -88(%ebp) # 4-byte Folded Spill
+ adcl %esi, -84(%ebp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -92(%ebp) # 4-byte Spill
+ movl -96(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre4L@PLT
+ addl -28(%ebp), %edi
+ movl -80(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -80(%ebp) # 4-byte Spill
+ movl -88(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -88(%ebp) # 4-byte Spill
+ movl -84(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -84(%ebp) # 4-byte Spill
+ adcl %esi, -92(%ebp) # 4-byte Folded Spill
+ movl -44(%ebp), %eax
+ movl 8(%ebp), %esi
+ subl (%esi), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ebx
+ sbbl 4(%esi), %ebx
+ movl -36(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl -32(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl 16(%esi), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl 20(%esi), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ sbbl %eax, -80(%ebp) # 4-byte Folded Spill
+ movl 24(%esi), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ sbbl %eax, -88(%ebp) # 4-byte Folded Spill
+ movl 28(%esi), %eax
+ movl %eax, -108(%ebp) # 4-byte Spill
+ sbbl %eax, -84(%ebp) # 4-byte Folded Spill
+ sbbl $0, -92(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %ecx
+ movl %ecx, -132(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ subl %ecx, %eax
+ movl 36(%esi), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 40(%esi), %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 48(%esi), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 52(%esi), %ecx
+ movl %ecx, -116(%ebp) # 4-byte Spill
+ sbbl %ecx, -80(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ sbbl %ecx, -88(%ebp) # 4-byte Folded Spill
+ movl 60(%esi), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ sbbl %ecx, -84(%ebp) # 4-byte Folded Spill
+ sbbl $0, -92(%ebp) # 4-byte Folded Spill
+ addl -100(%ebp), %eax # 4-byte Folded Reload
+ adcl -112(%ebp), %ebx # 4-byte Folded Reload
+ movl %eax, 16(%esi)
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -104(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%esi)
+ adcl -108(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 24(%esi)
+ adcl -132(%ebp), %edi # 4-byte Folded Reload
+ movl %edx, 28(%esi)
+ movl -80(%ebp), %eax # 4-byte Reload
+ adcl -136(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 32(%esi)
+ movl -88(%ebp), %ecx # 4-byte Reload
+ adcl -128(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ movl -84(%ebp), %eax # 4-byte Reload
+ adcl -140(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%esi)
+ movl -92(%ebp), %ecx # 4-byte Reload
+ adcl -144(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%esi)
+ movl %ecx, 48(%esi)
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 52(%esi)
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 56(%esi)
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 60(%esi)
+ addl $156, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end112:
+ .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L
+
+ .globl mcl_fpDbl_sqrPre8L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre8L,@function
+mcl_fpDbl_sqrPre8L: # @mcl_fpDbl_sqrPre8L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $156, %esp
+ calll .L113$pb
+.L113$pb:
+ popl %ebx
+.Ltmp4:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp4-.L113$pb), %ebx
+ movl %ebx, -96(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre4L@PLT
+ leal 16(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 32(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre4L@PLT
+ movl (%edi), %esi
+ movl 4(%edi), %ecx
+ addl 16(%edi), %esi
+ movl %esi, -108(%ebp) # 4-byte Spill
+ adcl 20(%edi), %ecx
+ seto %al
+ lahf
+ movl %eax, %edx
+ addl %esi, %esi
+ movl %esi, -84(%ebp) # 4-byte Spill
+ movl %ecx, %esi
+ adcl %esi, %esi
+ movl %esi, -80(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %esi
+ popl %eax
+ movl %esi, -88(%ebp) # 4-byte Spill
+ movl 24(%edi), %esi
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 8(%edi), %esi
+ movl 28(%edi), %edx
+ adcl 12(%edi), %edx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -100(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %ebx
+ sbbl %edi, %edi
+ movl %edi, -92(%ebp) # 4-byte Spill
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB113_2
+# BB#1:
+ movl $0, -80(%ebp) # 4-byte Folded Spill
+ movl $0, -84(%ebp) # 4-byte Folded Spill
+.LBB113_2:
+ movl %esi, %ebx
+ movl -88(%ebp), %edi # 4-byte Reload
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ adcl %ebx, %ebx
+ movl %edx, %edi
+ adcl %edi, %edi
+ movl -104(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB113_4
+# BB#3:
+ xorl %edi, %edi
+ xorl %ebx, %ebx
+.LBB113_4:
+ movl %ebx, -88(%ebp) # 4-byte Spill
+ movl -108(%ebp), %eax # 4-byte Reload
+ movl %eax, -60(%ebp)
+ movl %ecx, -56(%ebp)
+ movl %esi, -52(%ebp)
+ movl %edx, -48(%ebp)
+ movl %eax, -76(%ebp)
+ movl %ecx, -72(%ebp)
+ movl %esi, -68(%ebp)
+ movl %edx, -64(%ebp)
+ movl -100(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB113_5
+# BB#6:
+ movl $0, -100(%ebp) # 4-byte Folded Spill
+ jmp .LBB113_7
+.LBB113_5:
+ shrl $31, %edx
+ movl %edx, -100(%ebp) # 4-byte Spill
+.LBB113_7:
+ leal -76(%ebp), %eax
+ movl %eax, 8(%esp)
+ leal -60(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -44(%ebp), %eax
+ movl %eax, (%esp)
+ movl -92(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -96(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre4L@PLT
+ movl -84(%ebp), %eax # 4-byte Reload
+ addl -28(%ebp), %eax
+ movl %eax, -84(%ebp) # 4-byte Spill
+ movl -80(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -80(%ebp) # 4-byte Spill
+ movl -88(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -88(%ebp) # 4-byte Spill
+ adcl -16(%ebp), %edi
+ movl %edi, -92(%ebp) # 4-byte Spill
+ adcl -100(%ebp), %esi # 4-byte Folded Reload
+ movl -44(%ebp), %eax
+ movl 8(%ebp), %edi
+ subl (%edi), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ebx
+ sbbl 4(%edi), %ebx
+ movl -36(%ebp), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl -32(%ebp), %edx
+ sbbl 12(%edi), %edx
+ movl 16(%edi), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ sbbl %eax, -84(%ebp) # 4-byte Folded Spill
+ movl 20(%edi), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ sbbl %eax, -80(%ebp) # 4-byte Folded Spill
+ movl 24(%edi), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ sbbl %eax, -88(%ebp) # 4-byte Folded Spill
+ movl 28(%edi), %eax
+ movl %eax, -108(%ebp) # 4-byte Spill
+ sbbl %eax, -92(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ movl 32(%edi), %ecx
+ movl %ecx, -132(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ subl %ecx, %eax
+ movl 36(%edi), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 40(%edi), %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 44(%edi), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 48(%edi), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ sbbl %ecx, -84(%ebp) # 4-byte Folded Spill
+ movl 52(%edi), %ecx
+ movl %ecx, -116(%ebp) # 4-byte Spill
+ sbbl %ecx, -80(%ebp) # 4-byte Folded Spill
+ movl 56(%edi), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ sbbl %ecx, -88(%ebp) # 4-byte Folded Spill
+ movl 60(%edi), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ sbbl %ecx, -92(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ addl -100(%ebp), %eax # 4-byte Folded Reload
+ adcl -112(%ebp), %ebx # 4-byte Folded Reload
+ movl %eax, 16(%edi)
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -104(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%edi)
+ adcl -108(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 24(%edi)
+ movl -84(%ebp), %eax # 4-byte Reload
+ adcl -132(%ebp), %eax # 4-byte Folded Reload
+ movl %edx, 28(%edi)
+ movl -80(%ebp), %ecx # 4-byte Reload
+ adcl -136(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%edi)
+ movl -88(%ebp), %eax # 4-byte Reload
+ adcl -128(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%edi)
+ movl -92(%ebp), %ecx # 4-byte Reload
+ adcl -140(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%edi)
+ adcl -144(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 44(%edi)
+ movl %esi, 48(%edi)
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 52(%edi)
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 56(%edi)
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 60(%edi)
+ addl $156, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end113:
+ .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L
+
+ .globl mcl_fp_mont8L
+ .align 16, 0x90
+ .type mcl_fp_mont8L,@function
+mcl_fp_mont8L: # @mcl_fp_mont8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $700, %esp # imm = 0x2BC
+ calll .L114$pb
+.L114$pb:
+ popl %ebx
+.Ltmp5:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp5-.L114$pb), %ebx
+ movl 732(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 664(%esp), %ebp
+ movl 668(%esp), %edi
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 696(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 688(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 684(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 676(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 672(%esp), %esi
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 624(%esp), %ebp
+ adcl 628(%esp), %edi
+ adcl 632(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ sbbl %eax, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 584(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 60(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 584(%esp), %edi
+ adcl 588(%esp), %esi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 592(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 596(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 600(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 604(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 608(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 612(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 616(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %edi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 732(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ andl $1, %ebp
+ addl 544(%esp), %edi
+ adcl 548(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 568(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 728(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ addl 504(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 524(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 536(%esp), %ebp
+ sbbl %edi, %edi
+ movl %esi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 464(%esp), %esi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 480(%esp), %edi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 484(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 488(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 496(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 724(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 424(%esp), %ecx
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 428(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 436(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 444(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 448(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 384(%esp), %esi
+ adcl 388(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 392(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 396(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 400(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 404(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 408(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 412(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 416(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 28(%esp), %ecx # 4-byte Reload
+ addl 344(%esp), %ecx
+ adcl 348(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 352(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 364(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 372(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %ebp, %eax
+ andl $1, %eax
+ addl 304(%esp), %edi
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 308(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 312(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 316(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 328(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 336(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 264(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 272(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 276(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 284(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 224(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 228(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 232(%esp), %esi
+ adcl 236(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 240(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 244(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 248(%esp), %ebp
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 256(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 184(%esp), %ecx
+ adcl 188(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 196(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 204(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 144(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 152(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 160(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 172(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 176(%esp), %ebp
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 104(%esp), %ecx
+ adcl 108(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 116(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 128(%esp), %edi
+ adcl 132(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 24(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ andl $1, %esi
+ addl 64(%esp), %ebp
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebx
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 88(%esp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %eax, %edx
+ movl 732(%esp), %ebp
+ subl (%ebp), %edx
+ movl %ecx, %eax
+ sbbl 4(%ebp), %eax
+ movl %ebx, %ecx
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ sbbl 20(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ sbbl $0, %esi
+ andl $1, %esi
+ movl %esi, %ecx
+ jne .LBB114_2
+# BB#1:
+ movl %edx, %ebp
+.LBB114_2:
+ movl 720(%esp), %edx
+ movl %ebp, (%edx)
+ testb %cl, %cl
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB114_4
+# BB#3:
+ movl %eax, %ebp
+.LBB114_4:
+ movl %ebp, 4(%edx)
+ jne .LBB114_6
+# BB#5:
+ movl 12(%esp), %ebx # 4-byte Reload
+.LBB114_6:
+ movl %ebx, 8(%edx)
+ movl 28(%esp), %eax # 4-byte Reload
+ jne .LBB114_8
+# BB#7:
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+.LBB114_8:
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%edx)
+ movl 40(%esp), %edi # 4-byte Reload
+ jne .LBB114_10
+# BB#9:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB114_10:
+ movl %edi, 16(%edx)
+ jne .LBB114_12
+# BB#11:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB114_12:
+ movl %eax, 20(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ jne .LBB114_14
+# BB#13:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB114_14:
+ movl %eax, 24(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ jne .LBB114_16
+# BB#15:
+ movl 52(%esp), %eax # 4-byte Reload
+.LBB114_16:
+ movl %eax, 28(%edx)
+ addl $700, %esp # imm = 0x2BC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end114:
+ .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L
+
+ .globl mcl_fp_montNF8L
+ .align 16, 0x90
+ .type mcl_fp_montNF8L,@function
+mcl_fp_montNF8L: # @mcl_fp_montNF8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $700, %esp # imm = 0x2BC
+ calll .L115$pb
+.L115$pb:
+ popl %ebx
+.Ltmp6:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp6-.L115$pb), %ebx
+ movl 732(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 664(%esp), %ebp
+ movl 668(%esp), %edi
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 696(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 688(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 684(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 676(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 672(%esp), %esi
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 624(%esp), %ebp
+ adcl 628(%esp), %edi
+ adcl 632(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 640(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 584(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 616(%esp), %ecx
+ addl 584(%esp), %edi
+ adcl 588(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 596(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 604(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 732(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ addl 544(%esp), %edi
+ adcl 548(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 564(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 572(%esp), %edi
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 576(%esp), %ebp
+ movl 728(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 536(%esp), %ecx
+ addl 504(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 528(%esp), %edi
+ adcl 532(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 464(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 472(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 484(%esp), %esi
+ adcl 488(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 496(%esp), %edi
+ movl 728(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 456(%esp), %eax
+ movl 44(%esp), %edx # 4-byte Reload
+ addl 424(%esp), %edx
+ adcl 428(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 432(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 436(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 440(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 448(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 452(%esp), %edi
+ movl %edi, %ebp
+ movl %eax, %edi
+ adcl $0, %edi
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 384(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 396(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 412(%esp), %ebp
+ adcl 416(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 376(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 344(%esp), %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 352(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 360(%esp), %esi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 364(%esp), %edi
+ adcl 368(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 304(%esp), %ebp
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 308(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 320(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 324(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 328(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 336(%esp), %edi
+ movl 728(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 724(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ movl 296(%esp), %edx
+ movl %ebp, %ecx
+ addl 264(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 280(%esp), %ebp
+ adcl 284(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl %edx, %edi
+ adcl $0, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 224(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 236(%esp), %esi
+ adcl 240(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 256(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 216(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 184(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 192(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 196(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 144(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 156(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 160(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 176(%esp), %ebp
+ movl 728(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 136(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 104(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 116(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ adcl $0, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 64(%esp), %esi
+ movl 32(%esp), %esi # 4-byte Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 68(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 76(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 92(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 96(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 732(%esp), %eax
+ subl (%eax), %edx
+ sbbl 4(%eax), %ecx
+ sbbl 8(%eax), %esi
+ sbbl 12(%eax), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ sbbl 16(%eax), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %ebx # 4-byte Reload
+ sbbl 20(%eax), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ sbbl 24(%eax), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ sbbl 28(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ testl %edi, %edi
+ js .LBB115_2
+# BB#1:
+ movl %edx, 56(%esp) # 4-byte Spill
+.LBB115_2:
+ movl 720(%esp), %edx
+ movl 56(%esp), %eax # 4-byte Reload
+ movl %eax, (%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB115_4
+# BB#3:
+ movl %ecx, %eax
+.LBB115_4:
+ movl %eax, 4(%edx)
+ js .LBB115_6
+# BB#5:
+ movl %esi, 32(%esp) # 4-byte Spill
+.LBB115_6:
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%edx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ js .LBB115_8
+# BB#7:
+ movl 12(%esp), %esi # 4-byte Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+.LBB115_8:
+ movl 44(%esp), %esi # 4-byte Reload
+ movl %esi, 12(%edx)
+ js .LBB115_10
+# BB#9:
+ movl 16(%esp), %edi # 4-byte Reload
+.LBB115_10:
+ movl %edi, 16(%edx)
+ js .LBB115_12
+# BB#11:
+ movl 20(%esp), %ebp # 4-byte Reload
+.LBB115_12:
+ movl %ebp, 20(%edx)
+ js .LBB115_14
+# BB#13:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB115_14:
+ movl %eax, 24(%edx)
+ js .LBB115_16
+# BB#15:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB115_16:
+ movl %ecx, 28(%edx)
+ addl $700, %esp # imm = 0x2BC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end115:
+ .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L
+
+ .globl mcl_fp_montRed8L
+ .align 16, 0x90
+ .type mcl_fp_montRed8L,@function
+mcl_fp_montRed8L: # @mcl_fp_montRed8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $428, %esp # imm = 0x1AC
+ calll .L116$pb
+.L116$pb:
+ popl %ebx
+.Ltmp7:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp7-.L116$pb), %ebx
+ movl 456(%esp), %edx
+ movl -4(%edx), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl (%eax), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ imull %edi, %ecx
+ movl 60(%eax), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 56(%eax), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 52(%eax), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 48(%eax), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 44(%eax), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 40(%eax), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 36(%eax), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 32(%eax), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 24(%eax), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 20(%eax), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 16(%eax), %ebp
+ movl 12(%eax), %edi
+ movl 8(%eax), %esi
+ movl (%edx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 16(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 12(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 8(%edx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 4(%edx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, (%esp)
+ leal 392(%esp), %ecx
+ calll .LmulPv256x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 392(%esp), %eax
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 396(%esp), %ecx
+ adcl 400(%esp), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 404(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 408(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ movl 64(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 352(%esp), %edi
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 356(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 360(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 364(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 368(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 376(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 380(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 384(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 312(%esp), %edi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 316(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 272(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 276(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 232(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 236(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 252(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 192(%esp), %edi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 196(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 204(%esp), %edi
+ adcl 208(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 152(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ adcl 160(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 168(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 172(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 180(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 112(%esp), %esi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 116(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 128(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %eax, %esi
+ adcl 136(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ecx, %edx
+ subl 24(%esp), %edx # 4-byte Folded Reload
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ movl 104(%esp), %ebp # 4-byte Reload
+ sbbl 28(%esp), %ebp # 4-byte Folded Reload
+ sbbl 32(%esp), %ebx # 4-byte Folded Reload
+ sbbl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ sbbl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ sbbl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ sbbl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 96(%esp) # 4-byte Spill
+ sbbl $0, %edi
+ andl $1, %edi
+ jne .LBB116_2
+# BB#1:
+ movl %edx, %ecx
+.LBB116_2:
+ movl 448(%esp), %edx
+ movl %ecx, (%edx)
+ movl %edi, %ecx
+ testb %cl, %cl
+ jne .LBB116_4
+# BB#3:
+ movl %eax, 108(%esp) # 4-byte Spill
+.LBB116_4:
+ movl 108(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB116_6
+# BB#5:
+ movl %ebp, %eax
+.LBB116_6:
+ movl %eax, 8(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ jne .LBB116_8
+# BB#7:
+ movl %ebx, %ebp
+.LBB116_8:
+ movl %ebp, 12(%edx)
+ movl 100(%esp), %ebx # 4-byte Reload
+ jne .LBB116_10
+# BB#9:
+ movl 68(%esp), %ebx # 4-byte Reload
+.LBB116_10:
+ movl %ebx, 16(%edx)
+ movl 80(%esp), %edi # 4-byte Reload
+ jne .LBB116_12
+# BB#11:
+ movl 72(%esp), %edi # 4-byte Reload
+.LBB116_12:
+ movl %edi, 20(%edx)
+ movl 88(%esp), %esi # 4-byte Reload
+ jne .LBB116_14
+# BB#13:
+ movl 92(%esp), %esi # 4-byte Reload
+.LBB116_14:
+ movl %esi, 24(%edx)
+ jne .LBB116_16
+# BB#15:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB116_16:
+ movl %eax, 28(%edx)
+ addl $428, %esp # imm = 0x1AC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end116:
+ .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L
+
+ .globl mcl_fp_addPre8L
+ .align 16, 0x90
+ .type mcl_fp_addPre8L,@function
+mcl_fp_addPre8L: # @mcl_fp_addPre8L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 20(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %edi
+ adcl 8(%esi), %edi
+ movl 16(%esp), %ebx
+ movl %ecx, (%ebx)
+ movl 12(%esi), %ecx
+ movl %edx, 4(%ebx)
+ movl 16(%esi), %edx
+ adcl 12(%eax), %ecx
+ adcl 16(%eax), %edx
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %ecx, 12(%ebx)
+ movl 20(%esi), %ecx
+ adcl %edi, %ecx
+ movl 24(%eax), %edi
+ movl %edx, 16(%ebx)
+ movl 24(%esi), %edx
+ adcl %edi, %edx
+ movl %ecx, 20(%ebx)
+ movl %edx, 24(%ebx)
+ movl 28(%eax), %eax
+ movl 28(%esi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 28(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end117:
+ .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L
+
+ .globl mcl_fp_subPre8L
+ .align 16, 0x90
+ .type mcl_fp_subPre8L,@function
+mcl_fp_subPre8L: # @mcl_fp_subPre8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 28(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edi), %ebx
+ movl 20(%esp), %ebp
+ movl %edx, (%ebp)
+ movl 12(%ecx), %edx
+ sbbl 12(%edi), %edx
+ movl %esi, 4(%ebp)
+ movl 16(%ecx), %esi
+ sbbl 16(%edi), %esi
+ movl %ebx, 8(%ebp)
+ movl 20(%edi), %ebx
+ movl %edx, 12(%ebp)
+ movl 20(%ecx), %edx
+ sbbl %ebx, %edx
+ movl 24(%edi), %ebx
+ movl %esi, 16(%ebp)
+ movl 24(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edx, 20(%ebp)
+ movl %esi, 24(%ebp)
+ movl 28(%edi), %edx
+ movl 28(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 28(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end118:
+ .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L
+
+ .globl mcl_fp_shr1_8L
+ .align 16, 0x90
+ .type mcl_fp_shr1_8L,@function
+mcl_fp_shr1_8L: # @mcl_fp_shr1_8L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %eax
+ shrdl $1, %eax, %ecx
+ movl %ecx, 24(%esi)
+ shrl %eax
+ movl %eax, 28(%esi)
+ popl %esi
+ retl
+.Lfunc_end119:
+ .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L
+
+ .globl mcl_fp_add8L
+ .align 16, 0x90
+ .type mcl_fp_add8L,@function
+mcl_fp_add8L: # @mcl_fp_add8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 44(%esp), %edx
+ addl (%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl 4(%edx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%edx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%edx), %esi
+ movl 16(%edx), %eax
+ adcl 12(%edi), %esi
+ adcl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ adcl 20(%edi), %ecx
+ movl 24(%edx), %ebx
+ adcl 24(%edi), %ebx
+ movl 28(%edx), %edi
+ movl 48(%esp), %edx
+ adcl 28(%edx), %edi
+ movl 40(%esp), %edx
+ movl %ebp, (%edx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%edx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%edx)
+ movl %esi, 12(%edx)
+ movl %eax, 16(%edx)
+ movl %ecx, 20(%edx)
+ movl %ebx, 24(%edx)
+ movl %edi, 28(%edx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 52(%esp), %edx
+ movl 8(%esp), %ebp # 4-byte Reload
+ subl (%edx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl 52(%esp), %edx
+ sbbl 4(%edx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 52(%esp), %edx
+ sbbl 8(%edx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp
+ sbbl 12(%ebp), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 4(%esp), %edx # 4-byte Reload
+ sbbl 16(%ebp), %edx
+ movl %edx, %esi
+ sbbl 20(%ebp), %ecx
+ sbbl 24(%ebp), %ebx
+ sbbl 28(%ebp), %edi
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB120_2
+# BB#1: # %nocarry
+ movl 8(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %ebp
+ movl %edx, (%ebp)
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%ebp)
+ movl 12(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%ebp)
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebp)
+ movl %esi, 16(%ebp)
+ movl %ecx, 20(%ebp)
+ movl %ebx, 24(%ebp)
+ movl %edi, 28(%ebp)
+.LBB120_2: # %carry
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end120:
+ .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L
+
+ .globl mcl_fp_addNF8L
+ .align 16, 0x90
+ .type mcl_fp_addNF8L,@function
+mcl_fp_addNF8L: # @mcl_fp_addNF8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 84(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edi
+ movl 80(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 4(%ebx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 28(%eax), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 20(%eax), %ebp
+ movl 16(%eax), %esi
+ movl 12(%eax), %edx
+ movl 8(%eax), %ecx
+ adcl 8(%ebx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 12(%ebx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 16(%ebx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 20(%ebx), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 24(%ebx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 28(%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, %eax
+ subl (%ebx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 4(%ebx), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ sbbl 8(%ebx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%ebx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 20(%ebx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ sbbl 24(%ebx), %ebp
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 28(%ebx), %esi
+ testl %esi, %esi
+ js .LBB121_2
+# BB#1:
+ movl (%esp), %eax # 4-byte Reload
+.LBB121_2:
+ movl 76(%esp), %ebx
+ movl %eax, (%ebx)
+ movl 32(%esp), %eax # 4-byte Reload
+ js .LBB121_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB121_4:
+ movl %eax, 4(%ebx)
+ movl 40(%esp), %edx # 4-byte Reload
+ movl 28(%esp), %edi # 4-byte Reload
+ js .LBB121_6
+# BB#5:
+ movl 8(%esp), %edi # 4-byte Reload
+.LBB121_6:
+ movl %edi, 8(%ebx)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB121_8
+# BB#7:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB121_8:
+ movl %eax, 12(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB121_10
+# BB#9:
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB121_10:
+ movl %edx, 16(%ebx)
+ js .LBB121_12
+# BB#11:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB121_12:
+ movl %ecx, 20(%ebx)
+ js .LBB121_14
+# BB#13:
+ movl %ebp, %eax
+.LBB121_14:
+ movl %eax, 24(%ebx)
+ js .LBB121_16
+# BB#15:
+ movl %esi, %edi
+.LBB121_16:
+ movl %edi, 28(%ebx)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end121:
+ .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L
+
+ .globl mcl_fp_sub8L
+ .align 16, 0x90
+ .type mcl_fp_sub8L,@function
+mcl_fp_sub8L: # @mcl_fp_sub8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 52(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ xorl %ebx, %ebx
+ movl 56(%esp), %ebp
+ subl (%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 4(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%esi), %edx
+ sbbl 8(%ebp), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 16(%esi), %ecx
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%esi), %edi
+ sbbl 24(%ebp), %edi
+ movl 28(%esi), %esi
+ sbbl 28(%ebp), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 48(%esp), %ebx
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ebx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%ebx)
+ movl %ecx, 16(%ebx)
+ movl %eax, 20(%ebx)
+ movl %edi, 24(%ebx)
+ movl %esi, 28(%ebx)
+ je .LBB122_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 60(%esp), %esi
+ movl 16(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ adcl 8(%esi), %ebp
+ movl 12(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl %eax, 20(%ebx)
+ movl 24(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 24(%ebx)
+ movl 28(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+.LBB122_2: # %nocarry
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end122:
+ .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L
+
+ .globl mcl_fp_subNF8L
+ .align 16, 0x90
+ .type mcl_fp_subNF8L,@function
+mcl_fp_subNF8L: # @mcl_fp_subNF8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edx
+ movl 68(%esp), %ecx
+ subl (%ecx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ sbbl 4(%ecx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 28(%eax), %edx
+ movl 24(%eax), %esi
+ movl 20(%eax), %edi
+ movl 16(%eax), %ebx
+ movl 12(%eax), %ebp
+ movl 8(%eax), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %edx, %edi
+ sbbl 28(%ecx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sarl $31, %edi
+ movl 72(%esp), %ebp
+ movl 28(%ebp), %eax
+ andl %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%ebp), %eax
+ andl %edi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 20(%ebp), %ebx
+ andl %edi, %ebx
+ movl 16(%ebp), %esi
+ andl %edi, %esi
+ movl 12(%ebp), %edx
+ andl %edi, %edx
+ movl 8(%ebp), %ecx
+ andl %edi, %ecx
+ movl 4(%ebp), %eax
+ andl %edi, %eax
+ andl (%ebp), %edi
+ addl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl 60(%esp), %ebp
+ movl %edi, (%ebp)
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 4(%ebp)
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 8(%ebp)
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 12(%ebp)
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 16(%ebp)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%ebp)
+ movl %eax, 24(%ebp)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ebp)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end123:
+ .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L
+
+ .globl mcl_fpDbl_add8L
+ .align 16, 0x90
+ .type mcl_fpDbl_add8L,@function
+mcl_fpDbl_add8L: # @mcl_fpDbl_add8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 84(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 80(%esp), %ebp
+ addl (%ebp), %esi
+ adcl 4(%ebp), %edx
+ movl 8(%ecx), %edi
+ adcl 8(%ebp), %edi
+ movl 12(%ebp), %ebx
+ movl 76(%esp), %eax
+ movl %esi, (%eax)
+ movl 16(%ebp), %esi
+ adcl 12(%ecx), %ebx
+ adcl 16(%ecx), %esi
+ movl %edx, 4(%eax)
+ movl 40(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %edi, 8(%eax)
+ movl 20(%ecx), %edx
+ movl %ebx, 12(%eax)
+ movl 20(%ebp), %edi
+ adcl %edx, %edi
+ movl 24(%ecx), %edx
+ movl %esi, 16(%eax)
+ movl 24(%ebp), %esi
+ adcl %edx, %esi
+ movl 28(%ecx), %edx
+ movl %edi, 20(%eax)
+ movl 28(%ebp), %ebx
+ adcl %edx, %ebx
+ movl 32(%ecx), %edx
+ movl %esi, 24(%eax)
+ movl 32(%ebp), %esi
+ adcl %edx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 36(%ecx), %edx
+ movl %ebx, 28(%eax)
+ movl 36(%ebp), %ebx
+ adcl %edx, %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 40(%ebp), %eax
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl 44(%ebp), %edi
+ adcl %edx, %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl 48(%ebp), %eax
+ adcl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 52(%ecx), %edx
+ movl 52(%ebp), %esi
+ adcl %edx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl 56(%ebp), %eax
+ adcl %edx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%ecx), %ecx
+ movl 60(%ebp), %ebp
+ adcl %ecx, %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ movl 88(%esp), %edx
+ subl (%edx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ sbbl 4(%eax), %ebx
+ movl %eax, %edx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ movl %edx, %ebx
+ sbbl 8(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl 24(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ sbbl 16(%ebx), %eax
+ sbbl 20(%ebx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ sbbl 24(%ebx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ sbbl 28(%ebx), %ebp
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB124_2
+# BB#1:
+ movl %eax, %edi
+.LBB124_2:
+ testb %cl, %cl
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB124_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB124_4:
+ movl 76(%esp), %eax
+ movl %ecx, 32(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl 32(%esp), %edx # 4-byte Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ movl 28(%esp), %ebx # 4-byte Reload
+ jne .LBB124_6
+# BB#5:
+ movl 4(%esp), %ebx # 4-byte Reload
+.LBB124_6:
+ movl %ebx, 36(%eax)
+ jne .LBB124_8
+# BB#7:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB124_8:
+ movl %esi, 40(%eax)
+ movl 36(%esp), %esi # 4-byte Reload
+ jne .LBB124_10
+# BB#9:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB124_10:
+ movl %edx, 44(%eax)
+ movl %edi, 48(%eax)
+ movl 52(%esp), %edx # 4-byte Reload
+ jne .LBB124_12
+# BB#11:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB124_12:
+ movl %esi, 52(%eax)
+ jne .LBB124_14
+# BB#13:
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB124_14:
+ movl %edx, 56(%eax)
+ jne .LBB124_16
+# BB#15:
+ movl %ebp, %ecx
+.LBB124_16:
+ movl %ecx, 60(%eax)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end124:
+ .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L
+
+ .globl mcl_fpDbl_sub8L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub8L,@function
+mcl_fpDbl_sub8L: # @mcl_fpDbl_sub8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %edx
+ movl 68(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %edx
+ movl 8(%edi), %esi
+ sbbl 8(%ebx), %esi
+ movl 60(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edi), %eax
+ sbbl 12(%ebx), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%edi), %edx
+ sbbl 16(%ebx), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebx), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%edi), %eax
+ sbbl %esi, %eax
+ movl 24(%ebx), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%edi), %edx
+ sbbl %esi, %edx
+ movl 28(%ebx), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%edi), %eax
+ sbbl %esi, %eax
+ movl 32(%ebx), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%edi), %edx
+ sbbl %esi, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 36(%ebx), %edx
+ movl %eax, 28(%ecx)
+ movl 36(%edi), %eax
+ sbbl %edx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 40(%ebx), %eax
+ movl 40(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 44(%ebx), %eax
+ movl 44(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%ebx), %eax
+ movl 48(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 52(%ebx), %eax
+ movl 52(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 56(%ebx), %eax
+ movl 56(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 60(%ebx), %eax
+ movl 60(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 72(%esp), %ebx
+ jne .LBB125_1
+# BB#2:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB125_3
+.LBB125_1:
+ movl 28(%ebx), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+.LBB125_3:
+ testb %al, %al
+ jne .LBB125_4
+# BB#5:
+ movl $0, %ebp
+ movl $0, %eax
+ jmp .LBB125_6
+.LBB125_4:
+ movl (%ebx), %eax
+ movl 4(%ebx), %ebp
+.LBB125_6:
+ jne .LBB125_7
+# BB#8:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB125_9
+.LBB125_7:
+ movl 24(%ebx), %edx
+ movl %edx, (%esp) # 4-byte Spill
+.LBB125_9:
+ jne .LBB125_10
+# BB#11:
+ movl $0, %edx
+ jmp .LBB125_12
+.LBB125_10:
+ movl 20(%ebx), %edx
+.LBB125_12:
+ jne .LBB125_13
+# BB#14:
+ movl $0, %esi
+ jmp .LBB125_15
+.LBB125_13:
+ movl 16(%ebx), %esi
+.LBB125_15:
+ jne .LBB125_16
+# BB#17:
+ movl $0, %edi
+ jmp .LBB125_18
+.LBB125_16:
+ movl 12(%ebx), %edi
+.LBB125_18:
+ jne .LBB125_19
+# BB#20:
+ xorl %ebx, %ebx
+ jmp .LBB125_21
+.LBB125_19:
+ movl 8(%ebx), %ebx
+.LBB125_21:
+ addl 16(%esp), %eax # 4-byte Folded Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 36(%ecx)
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 40(%ecx)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 44(%ecx)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 52(%ecx)
+ movl %eax, 56(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%ecx)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end125:
+ .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L
+
+ .align 16, 0x90
+ .type .LmulPv288x32,@function
+.LmulPv288x32: # @mulPv288x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl %edx, %esi
+ movl 76(%esp), %edi
+ movl %edi, %eax
+ mull 32(%esi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 28(%esi)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 24(%esi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 20(%esi)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 16(%esi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 12(%esi)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 8(%esi)
+ movl %edx, %ebx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 4(%esi)
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull (%esi)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%ecx)
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 36(%ecx)
+ movl %ecx, %eax
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end126:
+ .size .LmulPv288x32, .Lfunc_end126-.LmulPv288x32
+
+ .globl mcl_fp_mulUnitPre9L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre9L,@function
+mcl_fp_mulUnitPre9L: # @mcl_fp_mulUnitPre9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $76, %esp
+ calll .L127$pb
+.L127$pb:
+ popl %ebx
+.Ltmp8:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp8-.L127$pb), %ebx
+ movl 104(%esp), %eax
+ movl %eax, (%esp)
+ leal 32(%esp), %ecx
+ movl 100(%esp), %edx
+ calll .LmulPv288x32
+ movl 68(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edi
+ movl 48(%esp), %ebx
+ movl 44(%esp), %ebp
+ movl 40(%esp), %esi
+ movl 32(%esp), %edx
+ movl 36(%esp), %ecx
+ movl 96(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %ebp, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %edi, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ addl $76, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end127:
+ .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L
+
+ .globl mcl_fpDbl_mulPre9L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre9L,@function
+mcl_fpDbl_mulPre9L: # @mcl_fpDbl_mulPre9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $428, %esp # imm = 0x1AC
+ calll .L128$pb
+.L128$pb:
+ popl %esi
+.Ltmp9:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp9-.L128$pb), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 452(%esp), %edx
+ movl %edx, %ebp
+ movl %esi, %ebx
+ calll .LmulPv288x32
+ movl 420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl 388(%esp), %edi
+ movl 448(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 456(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl %ebp, %edx
+ movl %esi, %ebx
+ calll .LmulPv288x32
+ addl 344(%esp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 364(%esp), %ebx
+ movl 360(%esp), %edi
+ movl 356(%esp), %esi
+ movl 348(%esp), %ecx
+ movl 352(%esp), %edx
+ movl 448(%esp), %eax
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 304(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 324(%esp), %edi
+ movl 320(%esp), %ebp
+ movl 316(%esp), %esi
+ movl 308(%esp), %ecx
+ movl 312(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 264(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 288(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 284(%esp), %ebx
+ movl 280(%esp), %edi
+ movl 276(%esp), %esi
+ movl 268(%esp), %ecx
+ movl 272(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 224(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 248(%esp), %ebx
+ movl 244(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 240(%esp), %edi
+ movl 236(%esp), %ebp
+ movl 228(%esp), %ecx
+ movl 232(%esp), %edx
+ movl 448(%esp), %eax
+ movl 44(%esp), %esi # 4-byte Reload
+ movl %esi, 16(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 44(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 204(%esp), %edi
+ movl 200(%esp), %ebx
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 448(%esp), %eax
+ movl 44(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 20(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 144(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 164(%esp), %ebx
+ movl 160(%esp), %edi
+ movl 156(%esp), %esi
+ movl 148(%esp), %ecx
+ movl 152(%esp), %edx
+ movl 448(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 12(%esp), %esi # 4-byte Reload
+ addl 104(%esp), %esi
+ movl 140(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 136(%esp), %ebp
+ movl 132(%esp), %edi
+ movl 128(%esp), %ebx
+ movl 124(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %edx
+ movl 108(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx
+ movl 448(%esp), %eax
+ movl %esi, 28(%eax)
+ movl 12(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl %esi, %ebp
+ addl 64(%esp), %ebp
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 88(%esp), %edi
+ movl 84(%esp), %ebx
+ movl 80(%esp), %esi
+ movl 76(%esp), %eax
+ movl 448(%esp), %ecx
+ movl %ebp, 32(%ecx)
+ movl %edx, 36(%ecx)
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ movl %edx, 40(%ecx)
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 56(%ecx)
+ movl %eax, 60(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 68(%ecx)
+ addl $428, %esp # imm = 0x1AC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end128:
+ .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L
+
+ .globl mcl_fpDbl_sqrPre9L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre9L,@function
+mcl_fpDbl_sqrPre9L: # @mcl_fpDbl_sqrPre9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $428, %esp # imm = 0x1AC
+ calll .L129$pb
+.L129$pb:
+ popl %ebx
+.Ltmp10:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp10-.L129$pb), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 452(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl %edx, %esi
+ movl %ebx, %edi
+ calll .LmulPv288x32
+ movl 420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl 388(%esp), %ebp
+ movl 448(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 4(%esi), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl %esi, %edx
+ movl %edi, %ebx
+ calll .LmulPv288x32
+ addl 344(%esp), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 364(%esp), %ebx
+ movl 360(%esp), %edi
+ movl 356(%esp), %esi
+ movl 348(%esp), %ecx
+ movl 352(%esp), %edx
+ movl 448(%esp), %eax
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 304(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 324(%esp), %edi
+ movl 320(%esp), %ebp
+ movl 316(%esp), %esi
+ movl 308(%esp), %ecx
+ movl 312(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 264(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 288(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 284(%esp), %ebx
+ movl 280(%esp), %edi
+ movl 276(%esp), %esi
+ movl 268(%esp), %ecx
+ movl 272(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 224(%esp), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 248(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 244(%esp), %edi
+ movl 240(%esp), %ebp
+ movl 236(%esp), %esi
+ movl 228(%esp), %ecx
+ movl 232(%esp), %edx
+ movl 448(%esp), %eax
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 204(%esp), %ebx
+ movl 200(%esp), %edi
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 144(%esp), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 164(%esp), %edi
+ movl 160(%esp), %ebp
+ movl 156(%esp), %esi
+ movl 148(%esp), %ecx
+ movl 152(%esp), %edx
+ movl 448(%esp), %eax
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 4(%esp), %esi # 4-byte Reload
+ addl 104(%esp), %esi
+ movl 140(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 136(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp
+ movl 128(%esp), %ebx
+ movl 124(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 120(%esp), %edi
+ movl 116(%esp), %edx
+ movl 108(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx
+ movl 448(%esp), %eax
+ movl %esi, 28(%eax)
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl %esi, %ebp
+ addl 64(%esp), %ebp
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 88(%esp), %edi
+ movl 84(%esp), %ebx
+ movl 80(%esp), %esi
+ movl 76(%esp), %eax
+ movl 448(%esp), %ecx
+ movl %ebp, 32(%ecx)
+ movl %edx, 36(%ecx)
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ movl %edx, 40(%ecx)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 56(%ecx)
+ movl %eax, 60(%ecx)
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 68(%ecx)
+ addl $428, %esp # imm = 0x1AC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end129:
+ .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L
+
+ .globl mcl_fp_mont9L
+ .align 16, 0x90
+ .type mcl_fp_mont9L,@function
+mcl_fp_mont9L: # @mcl_fp_mont9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $796, %esp # imm = 0x31C
+ calll .L130$pb
+.L130$pb:
+ popl %ebx
+.Ltmp11:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp11-.L130$pb), %ebx
+ movl 828(%esp), %eax
+ movl -4(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 752(%esp), %ebp
+ movl 756(%esp), %esi
+ movl %ebp, %eax
+ imull %edi, %eax
+ movl 788(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 780(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 776(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 772(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 768(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 764(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 760(%esp), %edi
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 712(%esp), %ebp
+ adcl 716(%esp), %esi
+ adcl 720(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 748(%esp), %ebp
+ sbbl %eax, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 64(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 672(%esp), %esi
+ adcl 676(%esp), %edi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 704(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 708(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ andl $1, %ebp
+ addl 632(%esp), %esi
+ adcl 636(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 660(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 824(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ addl 592(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 616(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 624(%esp), %esi
+ adcl 628(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ andl $1, %ebp
+ addl 552(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 580(%esp), %edi
+ adcl 584(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 824(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 512(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 524(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 536(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 544(%esp), %edi
+ adcl 548(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 472(%esp), %ebp
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 484(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 496(%esp), %ebp
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 500(%esp), %esi
+ adcl 504(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 508(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 820(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 432(%esp), %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 444(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 452(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 456(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl %esi, %eax
+ andl $1, %eax
+ addl 392(%esp), %ebp
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 396(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 400(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 404(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 408(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 412(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 416(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 424(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 428(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ addl 352(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 364(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 372(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 384(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 312(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 316(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 328(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 332(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 336(%esp), %esi
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 340(%esp), %edi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 344(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 348(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %ebp
+ movl 824(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 272(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 292(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 296(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 308(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 232(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 240(%esp), %esi
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 244(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 260(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 192(%esp), %ecx
+ adcl 196(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 200(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 212(%esp), %esi
+ adcl 216(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ andl $1, %ebp
+ addl 152(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 164(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 172(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 180(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 112(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 120(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ adcl 136(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ andl $1, %edi
+ addl 72(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 92(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 828(%esp), %ebx
+ subl (%ebx), %eax
+ movl %ecx, %edx
+ sbbl 4(%ebx), %edx
+ movl %esi, %ecx
+ sbbl 8(%ebx), %ecx
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 12(%ebx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 20(%ebx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 24(%ebx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ sbbl 28(%ebx), %esi
+ movl 60(%esp), %ebp # 4-byte Reload
+ sbbl 32(%ebx), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ sbbl $0, %edi
+ andl $1, %edi
+ movl %edi, %ebx
+ jne .LBB130_2
+# BB#1:
+ movl %esi, 32(%esp) # 4-byte Spill
+.LBB130_2:
+ testb %bl, %bl
+ movl 68(%esp), %esi # 4-byte Reload
+ jne .LBB130_4
+# BB#3:
+ movl %eax, %esi
+.LBB130_4:
+ movl 816(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 64(%esp), %eax # 4-byte Reload
+ jne .LBB130_6
+# BB#5:
+ movl %edx, %eax
+.LBB130_6:
+ movl %eax, 4(%ebp)
+ movl 52(%esp), %eax # 4-byte Reload
+ jne .LBB130_8
+# BB#7:
+ movl %ecx, %eax
+.LBB130_8:
+ movl %eax, 8(%ebp)
+ movl 44(%esp), %eax # 4-byte Reload
+ jne .LBB130_10
+# BB#9:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB130_10:
+ movl %eax, 12(%ebp)
+ jne .LBB130_12
+# BB#11:
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+.LBB130_12:
+ movl 40(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebp)
+ movl 36(%esp), %eax # 4-byte Reload
+ jne .LBB130_14
+# BB#13:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB130_14:
+ movl %eax, 20(%ebp)
+ movl 48(%esp), %eax # 4-byte Reload
+ jne .LBB130_16
+# BB#15:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB130_16:
+ movl %eax, 24(%ebp)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebp)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB130_18
+# BB#17:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB130_18:
+ movl %eax, 32(%ebp)
+ addl $796, %esp # imm = 0x31C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end130:
+ .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L
+
+ .globl mcl_fp_montNF9L
+ .align 16, 0x90
+ .type mcl_fp_montNF9L,@function
+mcl_fp_montNF9L: # @mcl_fp_montNF9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $796, %esp # imm = 0x31C
+ calll .L131$pb
+.L131$pb:
+ popl %ebx
+.Ltmp12:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp12-.L131$pb), %ebx
+ movl 828(%esp), %eax
+ movl -4(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 752(%esp), %esi
+ movl 756(%esp), %ebp
+ movl %esi, %eax
+ imull %edi, %eax
+ movl 788(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 780(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 776(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 772(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 768(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 764(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 760(%esp), %edi
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 712(%esp), %esi
+ adcl 716(%esp), %ebp
+ adcl 720(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 740(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 708(%esp), %eax
+ addl 672(%esp), %ebp
+ adcl 676(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 696(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 704(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 632(%esp), %ebp
+ adcl 636(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 656(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 664(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 628(%esp), %eax
+ addl 592(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 596(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 600(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 604(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 608(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 612(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 616(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 620(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 624(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 552(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 572(%esp), %esi
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 576(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 588(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 548(%esp), %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ addl 512(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 516(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 520(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 524(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 528(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 532(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 536(%esp), %ebp
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 540(%esp), %edi
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 544(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 472(%esp), %eax
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 496(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 500(%esp), %edi
+ movl %edi, %ebp
+ adcl 504(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 820(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ movl 468(%esp), %eax
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 432(%esp), %ecx
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 436(%esp), %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 440(%esp), %edi
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 444(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 448(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 452(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 460(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 464(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 392(%esp), %ebp
+ adcl 396(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 400(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 412(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 424(%esp), %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 428(%esp), %esi
+ movl 824(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 388(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 352(%esp), %ecx
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 356(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 360(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 364(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 368(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 372(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 376(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 380(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 312(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 324(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 340(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 348(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 308(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 272(%esp), %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 280(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 292(%esp), %ebp
+ adcl 296(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 232(%esp), %edi
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 236(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ adcl 252(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 228(%esp), %ebp
+ movl %esi, %ecx
+ addl 192(%esp), %ecx
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 196(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 208(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 152(%esp), %edi
+ adcl 156(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 160(%esp), %edi
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 164(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 188(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 148(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 112(%esp), %ecx
+ adcl 116(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 120(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 132(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 72(%esp), %edi
+ movl 44(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 80(%esp), %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 84(%esp), %edi
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 92(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 828(%esp), %eax
+ subl (%eax), %edx
+ sbbl 4(%eax), %ebx
+ movl %edi, %ecx
+ sbbl 8(%eax), %ecx
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 12(%eax), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ sbbl 16(%eax), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 20(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 24(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 28(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ sbbl 32(%eax), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB131_2
+# BB#1:
+ movl %edx, %eax
+.LBB131_2:
+ movl 816(%esp), %edx
+ movl %eax, (%edx)
+ movl 64(%esp), %esi # 4-byte Reload
+ js .LBB131_4
+# BB#3:
+ movl %ebx, %esi
+.LBB131_4:
+ movl %esi, 4(%edx)
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB131_6
+# BB#5:
+ movl %ecx, %edi
+.LBB131_6:
+ movl %edi, 8(%edx)
+ js .LBB131_8
+# BB#7:
+ movl 16(%esp), %ebp # 4-byte Reload
+.LBB131_8:
+ movl %ebp, 12(%edx)
+ js .LBB131_10
+# BB#9:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB131_10:
+ movl %eax, 16(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB131_12
+# BB#11:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB131_12:
+ movl %eax, 20(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB131_14
+# BB#13:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB131_14:
+ movl %eax, 24(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB131_16
+# BB#15:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB131_16:
+ movl %eax, 28(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB131_18
+# BB#17:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB131_18:
+ movl %eax, 32(%edx)
+ addl $796, %esp # imm = 0x31C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end131:
+ .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L
+
+ .globl mcl_fp_montRed9L
+ .align 16, 0x90
+ .type mcl_fp_montRed9L,@function
+mcl_fp_montRed9L: # @mcl_fp_montRed9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $492, %esp # imm = 0x1EC
+ calll .L132$pb
+.L132$pb:
+ popl %ebx
+.Ltmp13:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp13-.L132$pb), %ebx
+ movl 520(%esp), %edx
+ movl -4(%edx), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 516(%esp), %eax
+ movl (%eax), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ imull %edi, %ecx
+ movl 68(%eax), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 64(%eax), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 60(%eax), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 56(%eax), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 52(%eax), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 48(%eax), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 44(%eax), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 40(%eax), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 36(%eax), %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ movl 32(%eax), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 24(%eax), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 20(%eax), %ebp
+ movl 16(%eax), %edi
+ movl 12(%eax), %esi
+ movl 8(%eax), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl (%edx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 16(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%edx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 8(%edx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 4(%edx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, (%esp)
+ leal 448(%esp), %ecx
+ calll .LmulPv288x32
+ movl 76(%esp), %eax # 4-byte Reload
+ addl 448(%esp), %eax
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 452(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 460(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 464(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 468(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ sbbl %eax, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ movl 76(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 408(%esp), %ecx
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 412(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 416(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 424(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 428(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 432(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 436(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 440(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 368(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 404(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 328(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 364(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 288(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ movl 64(%esp), %eax # 4-byte Reload
+ addl 288(%esp), %eax
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 292(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 520(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 248(%esp), %esi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 264(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %edi, %esi
+ adcl $0, %esi
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 208(%esp), %edi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 212(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 220(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 520(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 168(%esp), %ebp
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 180(%esp), %ebp
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 184(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 128(%esp), %edi
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl 136(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ adcl 140(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ subl 20(%esp), %edi # 4-byte Folded Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 16(%esp), %eax # 4-byte Folded Reload
+ sbbl 24(%esp), %esi # 4-byte Folded Reload
+ sbbl 28(%esp), %ecx # 4-byte Folded Reload
+ sbbl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 92(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ movl %ebp, %edx
+ sbbl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 104(%esp) # 4-byte Spill
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB132_2
+# BB#1:
+ movl %ecx, 116(%esp) # 4-byte Spill
+.LBB132_2:
+ testb %dl, %dl
+ movl 120(%esp), %ecx # 4-byte Reload
+ jne .LBB132_4
+# BB#3:
+ movl %edi, %ecx
+.LBB132_4:
+ movl 512(%esp), %edi
+ movl %ecx, (%edi)
+ movl 88(%esp), %ecx # 4-byte Reload
+ jne .LBB132_6
+# BB#5:
+ movl %eax, 124(%esp) # 4-byte Spill
+.LBB132_6:
+ movl 124(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%edi)
+ movl 96(%esp), %eax # 4-byte Reload
+ jne .LBB132_8
+# BB#7:
+ movl %esi, %eax
+.LBB132_8:
+ movl %eax, 8(%edi)
+ movl 116(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl 108(%esp), %ebp # 4-byte Reload
+ jne .LBB132_10
+# BB#9:
+ movl 72(%esp), %ebp # 4-byte Reload
+.LBB132_10:
+ movl %ebp, 16(%edi)
+ movl 112(%esp), %ebx # 4-byte Reload
+ jne .LBB132_12
+# BB#11:
+ movl 76(%esp), %ebx # 4-byte Reload
+.LBB132_12:
+ movl %ebx, 20(%edi)
+ movl 100(%esp), %esi # 4-byte Reload
+ jne .LBB132_14
+# BB#13:
+ movl 84(%esp), %esi # 4-byte Reload
+.LBB132_14:
+ movl %esi, 24(%edi)
+ jne .LBB132_16
+# BB#15:
+ movl 92(%esp), %ecx # 4-byte Reload
+.LBB132_16:
+ movl %ecx, 28(%edi)
+ jne .LBB132_18
+# BB#17:
+ movl 104(%esp), %eax # 4-byte Reload
+.LBB132_18:
+ movl %eax, 32(%edi)
+ addl $492, %esp # imm = 0x1EC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end132:
+ .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L
+
+ .globl mcl_fp_addPre9L
+ .align 16, 0x90
+ .type mcl_fp_addPre9L,@function
+mcl_fp_addPre9L: # @mcl_fp_addPre9L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl %esi, 24(%ebx)
+ movl %edx, 28(%ebx)
+ movl 32(%eax), %eax
+ movl 32(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 32(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end133:
+ .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L
+
+ .globl mcl_fp_subPre9L
+ .align 16, 0x90
+ .type mcl_fp_subPre9L,@function
+mcl_fp_subPre9L: # @mcl_fp_subPre9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edi, 24(%ebp)
+ movl %esi, 28(%ebp)
+ movl 32(%edx), %edx
+ movl 32(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 32(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end134:
+ .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L
+
+ .globl mcl_fp_shr1_9L
+ .align 16, 0x90
+ .type mcl_fp_shr1_9L,@function
+mcl_fp_shr1_9L: # @mcl_fp_shr1_9L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 24(%esi)
+ movl 32(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 28(%esi)
+ shrl %eax
+ movl %eax, 32(%esi)
+ popl %esi
+ retl
+.Lfunc_end135:
+ .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L
+
+ .globl mcl_fp_add9L
+ .align 16, 0x90
+ .type mcl_fp_add9L,@function
+mcl_fp_add9L: # @mcl_fp_add9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 44(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, %ebp
+ adcl 4(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %eax
+ adcl 12(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 20(%ebx), %esi
+ adcl 20(%edi), %esi
+ movl 24(%ebx), %edx
+ adcl 24(%edi), %edx
+ movl 28(%ebx), %ecx
+ adcl 28(%edi), %ecx
+ movl 32(%ebx), %eax
+ adcl 32(%edi), %eax
+ movl 40(%esp), %edi
+ movl %ebp, (%edi)
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%edi)
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%edi)
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%edi)
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%edi)
+ movl %esi, 20(%edi)
+ movl %edx, 24(%edi)
+ movl %ecx, 28(%edi)
+ movl %eax, 32(%edi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 52(%esp), %edi
+ subl (%edi), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl 16(%esp), %ebp # 4-byte Reload
+ sbbl 4(%edi), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ sbbl 8(%edi), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebp # 4-byte Reload
+ sbbl 12(%edi), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 4(%esp), %ebp # 4-byte Reload
+ sbbl 16(%edi), %ebp
+ sbbl 20(%edi), %esi
+ sbbl 24(%edi), %edx
+ sbbl 28(%edi), %ecx
+ sbbl 32(%edi), %eax
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB136_2
+# BB#1: # %nocarry
+ movl (%esp), %edi # 4-byte Reload
+ movl 40(%esp), %ebx
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl %ebp, 16(%ebx)
+ movl %esi, 20(%ebx)
+ movl %edx, 24(%ebx)
+ movl %ecx, 28(%ebx)
+ movl %eax, 32(%ebx)
+.LBB136_2: # %carry
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end136:
+ .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L
+
+ .globl mcl_fp_addNF9L
+ .align 16, 0x90
+ .type mcl_fp_addNF9L,@function
+mcl_fp_addNF9L: # @mcl_fp_addNF9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 100(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edi
+ movl 96(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 4(%esi), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 28(%eax), %ebp
+ movl 24(%eax), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 20(%eax), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 16(%eax), %ebx
+ movl 12(%eax), %edx
+ movl 8(%eax), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 12(%esi), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 16(%esi), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 20(%esi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 24(%esi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 28(%esi), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 32(%esi), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 104(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, %ebp
+ subl (%esi), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ sbbl 4(%esi), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ sbbl 8(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ sbbl 16(%esi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ sbbl 20(%esi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ sbbl 24(%esi), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 28(%esi), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edx
+ movl %ecx, %ebp
+ sbbl 32(%esi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sarl $31, %esi
+ testl %esi, %esi
+ js .LBB137_2
+# BB#1:
+ movl (%esp), %eax # 4-byte Reload
+.LBB137_2:
+ movl 92(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB137_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB137_4:
+ movl %eax, 4(%ecx)
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB137_6
+# BB#5:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB137_6:
+ movl %eax, 8(%ecx)
+ movl %ebp, %eax
+ js .LBB137_8
+# BB#7:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB137_8:
+ movl %edx, 12(%ecx)
+ movl 56(%esp), %edx # 4-byte Reload
+ js .LBB137_10
+# BB#9:
+ movl 16(%esp), %ebx # 4-byte Reload
+.LBB137_10:
+ movl %ebx, 16(%ecx)
+ js .LBB137_12
+# BB#11:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB137_12:
+ movl %edi, 20(%ecx)
+ js .LBB137_14
+# BB#13:
+ movl 24(%esp), %esi # 4-byte Reload
+.LBB137_14:
+ movl %esi, 24(%ecx)
+ js .LBB137_16
+# BB#15:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB137_16:
+ movl %edx, 28(%ecx)
+ js .LBB137_18
+# BB#17:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB137_18:
+ movl %eax, 32(%ecx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end137:
+ .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L
+
+ .globl mcl_fp_sub9L
+ .align 16, 0x90
+ .type mcl_fp_sub9L,@function
+mcl_fp_sub9L: # @mcl_fp_sub9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 52(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 56(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 16(%esi), %edx
+ sbbl 16(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 20(%esi), %ecx
+ sbbl 20(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 28(%esi), %ebp
+ sbbl 28(%edi), %ebp
+ movl 32(%esi), %esi
+ sbbl 32(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 48(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl %edx, 16(%ebx)
+ movl %ecx, 20(%ebx)
+ movl %eax, 24(%ebx)
+ movl %ebp, 28(%ebx)
+ movl %esi, 32(%ebx)
+ je .LBB138_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 60(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl %ecx, 24(%ebx)
+ movl 28(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 28(%ebx)
+ movl 32(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 32(%ebx)
+.LBB138_2: # %nocarry
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end138:
+ .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L
+
+ .globl mcl_fp_subNF9L
+ .align 16, 0x90
+ .type mcl_fp_subNF9L,@function
+mcl_fp_subNF9L: # @mcl_fp_subNF9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $48, %esp
+ movl 72(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 76(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ sbbl 4(%esi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 28(%ecx), %edx
+ movl 24(%ecx), %edi
+ movl 20(%ecx), %ebx
+ movl 16(%ecx), %ebp
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ movl 76(%esp), %esi
+ sbbl 8(%esi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx
+ sbbl 12(%ecx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ sbbl 28(%ecx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ sbbl 32(%ecx), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ sarl $31, %ecx
+ movl %ecx, %eax
+ shldl $1, %edx, %eax
+ movl 80(%esp), %ebp
+ movl 12(%ebp), %edx
+ andl %eax, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 4(%ebp), %edi
+ andl %eax, %edi
+ andl (%ebp), %eax
+ movl 32(%ebp), %edx
+ andl %ecx, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 28(%ebp), %edx
+ andl %ecx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ roll %ecx
+ movl 24(%ebp), %ebx
+ andl %ecx, %ebx
+ movl 20(%ebp), %esi
+ andl %ecx, %esi
+ movl 16(%ebp), %edx
+ andl %ecx, %edx
+ andl 8(%ebp), %ecx
+ addl 32(%esp), %eax # 4-byte Folded Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ebp
+ movl %eax, (%ebp)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 4(%ebp)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebp)
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 12(%ebp)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 16(%ebp)
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 20(%ebp)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 24(%ebp)
+ movl %eax, 28(%ebp)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ebp)
+ addl $48, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end139:
+ .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L
+
+ .globl mcl_fpDbl_add9L
+ .align 16, 0x90
+ .type mcl_fpDbl_add9L,@function
+mcl_fpDbl_add9L: # @mcl_fpDbl_add9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $68, %esp
+ movl 96(%esp), %edx
+ movl 92(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %ecx
+ movl 8(%edx), %ebx
+ movl (%edx), %ebp
+ addl (%edi), %ebp
+ movl 88(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%edx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%edx), %esi
+ adcl 16(%edx), %ecx
+ movl %ebp, 4(%eax)
+ movl 44(%edx), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl %ebx, 8(%eax)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %ecx, 16(%eax)
+ movl 24(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 28(%edx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %ecx, 24(%eax)
+ movl 32(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 36(%edx), %ebp
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebp, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 40(%edx), %esi
+ movl %ecx, 32(%eax)
+ movl 40(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%edi), %eax
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl 48(%edi), %ebx
+ adcl %ecx, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ movl 52(%edi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 56(%edx), %esi
+ movl 56(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%edx), %ebp
+ movl 60(%edi), %esi
+ adcl %ebp, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 64(%edx), %eax
+ movl 64(%edi), %ebp
+ adcl %eax, %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 68(%edx), %edx
+ movl 68(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 100(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ subl (%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 4(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 8(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 12(%edi), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 20(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ sbbl 24(%edi), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 32(%esp), %ebp # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ sbbl 32(%edi), %ebx
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB140_2
+# BB#1:
+ movl %ebx, %ebp
+.LBB140_2:
+ testb %dl, %dl
+ movl 60(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl 36(%esp), %esi # 4-byte Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ jne .LBB140_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB140_4:
+ movl 88(%esp), %eax
+ movl %edx, 36(%eax)
+ movl %ebx, 40(%eax)
+ movl %edi, 44(%eax)
+ movl %esi, 48(%eax)
+ movl %ecx, 52(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB140_6
+# BB#5:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB140_6:
+ movl %ecx, 56(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB140_8
+# BB#7:
+ movl 24(%esp), %edx # 4-byte Reload
+.LBB140_8:
+ movl %edx, 60(%eax)
+ jne .LBB140_10
+# BB#9:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB140_10:
+ movl %ecx, 64(%eax)
+ movl %ebp, 68(%eax)
+ addl $68, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end140:
+ .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L
+
+ .globl mcl_fpDbl_sub9L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub9L,@function
+mcl_fpDbl_sub9L: # @mcl_fpDbl_sub9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 76(%esp), %ebx
+ movl (%ebx), %eax
+ movl 4(%ebx), %edx
+ movl 80(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %edx
+ movl 8(%ebx), %esi
+ sbbl 8(%ebp), %esi
+ movl 72(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%ebx), %eax
+ sbbl 12(%ebp), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%ebx), %edx
+ sbbl 16(%ebp), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebp), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%ebx), %eax
+ sbbl %esi, %eax
+ movl 24(%ebp), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%ebx), %edx
+ sbbl %esi, %edx
+ movl 28(%ebp), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%ebx), %eax
+ sbbl %esi, %eax
+ movl 32(%ebp), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%ebx), %edx
+ sbbl %esi, %edx
+ movl 36(%ebp), %esi
+ movl %eax, 28(%ecx)
+ movl 36(%ebx), %eax
+ sbbl %esi, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%ebp), %eax
+ movl %edx, 32(%ecx)
+ movl 40(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 44(%ebp), %eax
+ movl 44(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%ebp), %eax
+ movl 48(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 52(%ebp), %eax
+ movl 52(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 56(%ebp), %eax
+ movl 56(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 60(%ebp), %eax
+ movl 60(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 64(%ebp), %eax
+ movl 64(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 68(%ebp), %eax
+ movl 68(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 84(%esp), %ebp
+ jne .LBB141_1
+# BB#2:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB141_3
+.LBB141_1:
+ movl 32(%ebp), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+.LBB141_3:
+ testb %al, %al
+ jne .LBB141_4
+# BB#5:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ movl $0, %esi
+ jmp .LBB141_6
+.LBB141_4:
+ movl (%ebp), %esi
+ movl 4(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB141_6:
+ jne .LBB141_7
+# BB#8:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB141_9
+.LBB141_7:
+ movl 28(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB141_9:
+ jne .LBB141_10
+# BB#11:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB141_12
+.LBB141_10:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB141_12:
+ jne .LBB141_13
+# BB#14:
+ movl $0, %edi
+ jmp .LBB141_15
+.LBB141_13:
+ movl 20(%ebp), %edi
+.LBB141_15:
+ jne .LBB141_16
+# BB#17:
+ movl $0, %ebx
+ jmp .LBB141_18
+.LBB141_16:
+ movl 16(%ebp), %ebx
+.LBB141_18:
+ jne .LBB141_19
+# BB#20:
+ movl %ebp, %eax
+ movl $0, %ebp
+ jmp .LBB141_21
+.LBB141_19:
+ movl %ebp, %eax
+ movl 12(%eax), %ebp
+.LBB141_21:
+ jne .LBB141_22
+# BB#23:
+ xorl %eax, %eax
+ jmp .LBB141_24
+.LBB141_22:
+ movl 8(%eax), %eax
+.LBB141_24:
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 36(%ecx)
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 40(%ecx)
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 48(%ecx)
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 56(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 60(%ecx)
+ movl %eax, 64(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%ecx)
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end141:
+ .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L
+
+ .align 16, 0x90
+ .type .LmulPv320x32,@function
+.LmulPv320x32: # @mulPv320x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl %edx, %esi
+ movl 84(%esp), %edi
+ movl %edi, %eax
+ mull 36(%esi)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 32(%esi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 28(%esi)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 24(%esi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 20(%esi)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 16(%esi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 12(%esi)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 8(%esi)
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 4(%esi)
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull (%esi)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%ecx)
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 40(%ecx)
+ movl %ecx, %eax
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end142:
+ .size .LmulPv320x32, .Lfunc_end142-.LmulPv320x32
+
+ .globl mcl_fp_mulUnitPre10L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre10L,@function
+mcl_fp_mulUnitPre10L: # @mcl_fp_mulUnitPre10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $76, %esp
+ calll .L143$pb
+.L143$pb:
+ popl %ebx
+.Ltmp14:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp14-.L143$pb), %ebx
+ movl 104(%esp), %eax
+ movl %eax, (%esp)
+ leal 32(%esp), %ecx
+ movl 100(%esp), %edx
+ calll .LmulPv320x32
+ movl 72(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 60(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 56(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx
+ movl 48(%esp), %ebp
+ movl 44(%esp), %edi
+ movl 40(%esp), %esi
+ movl 32(%esp), %edx
+ movl 36(%esp), %ecx
+ movl 96(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebp, 16(%eax)
+ movl %ebx, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ addl $76, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end143:
+ .size mcl_fp_mulUnitPre10L, .Lfunc_end143-mcl_fp_mulUnitPre10L
+
+ .globl mcl_fpDbl_mulPre10L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre10L,@function
+mcl_fpDbl_mulPre10L: # @mcl_fpDbl_mulPre10L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $188, %esp
+ calll .L144$pb
+.L144$pb:
+ popl %ebx
+.Ltmp15:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp15-.L144$pb), %ebx
+ movl %ebx, -128(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl 12(%ebp), %esi
+ movl %esi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre5L@PLT
+ leal 20(%edi), %eax
+ movl %eax, 8(%esp)
+ leal 20(%esi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 40(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre5L@PLT
+ movl 28(%esi), %edi
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ addl 20(%esi), %ebx
+ movl %ebx, -148(%ebp) # 4-byte Spill
+ adcl 24(%esi), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ adcl 8(%esi), %edi
+ movl %edi, -140(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ addl 20(%esi), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ adcl 24(%esi), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ movl 28(%esi), %eax
+ adcl 8(%esi), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl 32(%esi), %eax
+ adcl 12(%esi), %eax
+ movl 36(%esi), %ecx
+ adcl 16(%esi), %ecx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %esi
+ popl %eax
+ movl %esi, -156(%ebp) # 4-byte Spill
+ movl %ebx, -124(%ebp) # 4-byte Spill
+ jb .LBB144_2
+# BB#1:
+ xorl %edi, %edi
+ movl $0, -124(%ebp) # 4-byte Folded Spill
+.LBB144_2:
+ movl %edi, -136(%ebp) # 4-byte Spill
+ movl 12(%ebp), %esi
+ movl %esi, %ebx
+ movl 36(%ebx), %esi
+ movl 32(%ebx), %edi
+ movl -96(%ebp), %edx # 4-byte Reload
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 12(%ebx), %edi
+ movl %edi, -116(%ebp) # 4-byte Spill
+ adcl 16(%ebx), %esi
+ movl %esi, -144(%ebp) # 4-byte Spill
+ movl %ecx, -112(%ebp) # 4-byte Spill
+ movl %eax, -104(%ebp) # 4-byte Spill
+ movl -160(%ebp), %edx # 4-byte Reload
+ movl %edx, -108(%ebp) # 4-byte Spill
+ movl -120(%ebp), %esi # 4-byte Reload
+ movl %esi, -96(%ebp) # 4-byte Spill
+ movl -152(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -100(%ebp) # 4-byte Spill
+ jb .LBB144_4
+# BB#3:
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+ movl $0, -104(%ebp) # 4-byte Folded Spill
+ movl $0, -108(%ebp) # 4-byte Folded Spill
+ movl $0, -96(%ebp) # 4-byte Folded Spill
+ movl $0, -100(%ebp) # 4-byte Folded Spill
+.LBB144_4:
+ movl -148(%ebp), %esi # 4-byte Reload
+ movl %esi, -72(%ebp)
+ movl -132(%ebp), %edi # 4-byte Reload
+ movl %edi, -68(%ebp)
+ movl -140(%ebp), %esi # 4-byte Reload
+ movl %esi, -64(%ebp)
+ movl %ebx, -92(%ebp)
+ movl -120(%ebp), %esi # 4-byte Reload
+ movl %esi, -88(%ebp)
+ movl %edx, -84(%ebp)
+ movl %eax, -80(%ebp)
+ movl %ecx, -76(%ebp)
+ sbbl %edx, %edx
+ movl -116(%ebp), %eax # 4-byte Reload
+ movl %eax, -60(%ebp)
+ movl -144(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -56(%ebp)
+ movl -156(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB144_6
+# BB#5:
+ movl $0, %ebx
+ movl $0, %eax
+ movl $0, %edi
+.LBB144_6:
+ movl %eax, -116(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -92(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -72(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -52(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -124(%ebp), %eax # 4-byte Reload
+ addl %eax, -100(%ebp) # 4-byte Folded Spill
+ adcl %edi, -96(%ebp) # 4-byte Folded Spill
+ movl -108(%ebp), %esi # 4-byte Reload
+ adcl -136(%ebp), %esi # 4-byte Folded Reload
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl %eax, -104(%ebp) # 4-byte Folded Spill
+ movl -112(%ebp), %edi # 4-byte Reload
+ adcl %ebx, %edi
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ andl $1, %edx
+ movl %edx, -116(%ebp) # 4-byte Spill
+ movl -128(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre5L@PLT
+ movl -100(%ebp), %eax # 4-byte Reload
+ addl -32(%ebp), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ adcl -24(%ebp), %esi
+ movl %esi, -108(%ebp) # 4-byte Spill
+ movl -104(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ adcl -16(%ebp), %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl %eax, -116(%ebp) # 4-byte Folded Spill
+ movl -52(%ebp), %ecx
+ movl 8(%ebp), %esi
+ subl (%esi), %ecx
+ movl -48(%ebp), %ebx
+ sbbl 4(%esi), %ebx
+ movl -44(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ movl -40(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl -36(%ebp), %edi
+ sbbl 16(%esi), %edi
+ movl 20(%esi), %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+ sbbl %eax, -100(%ebp) # 4-byte Folded Spill
+ movl 24(%esi), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ sbbl %eax, -96(%ebp) # 4-byte Folded Spill
+ movl 28(%esi), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ sbbl %eax, -108(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ sbbl %eax, -104(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ sbbl %eax, -112(%ebp) # 4-byte Folded Spill
+ sbbl $0, -116(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ subl %eax, %ecx
+ movl 44(%esi), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 48(%esi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ sbbl %eax, -120(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ sbbl %eax, %edx
+ movl 56(%esi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl 60(%esi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ sbbl %eax, -100(%ebp) # 4-byte Folded Spill
+ movl 64(%esi), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ sbbl %eax, -96(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ sbbl %eax, -108(%ebp) # 4-byte Folded Spill
+ movl 72(%esi), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ sbbl %eax, -104(%ebp) # 4-byte Folded Spill
+ movl 76(%esi), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ sbbl %eax, -112(%ebp) # 4-byte Folded Spill
+ sbbl $0, -116(%ebp) # 4-byte Folded Spill
+ addl -124(%ebp), %ecx # 4-byte Folded Reload
+ adcl -128(%ebp), %ebx # 4-byte Folded Reload
+ movl %ecx, 20(%esi)
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl -132(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 24(%esi)
+ adcl -136(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ adcl -140(%ebp), %edi # 4-byte Folded Reload
+ movl %edx, 32(%esi)
+ movl -100(%ebp), %eax # 4-byte Reload
+ adcl -160(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 36(%esi)
+ movl -96(%ebp), %ecx # 4-byte Reload
+ adcl -164(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%esi)
+ movl -108(%ebp), %eax # 4-byte Reload
+ adcl -168(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 44(%esi)
+ movl -104(%ebp), %ecx # 4-byte Reload
+ adcl -172(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl -112(%ebp), %edx # 4-byte Reload
+ adcl -176(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl -180(%ebp), %eax # 4-byte Folded Reload
+ movl %edx, 56(%esi)
+ movl %eax, 60(%esi)
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 64(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 68(%esi)
+ movl -152(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 72(%esi)
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 76(%esi)
+ addl $188, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end144:
+ .size mcl_fpDbl_mulPre10L, .Lfunc_end144-mcl_fpDbl_mulPre10L
+
+ .globl mcl_fpDbl_sqrPre10L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre10L,@function
+mcl_fpDbl_sqrPre10L: # @mcl_fpDbl_sqrPre10L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $188, %esp
+ calll .L145$pb
+.L145$pb:
+ popl %ebx
+.Ltmp16:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp16-.L145$pb), %ebx
+ movl %ebx, -120(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre5L@PLT
+ leal 20(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 40(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre5L@PLT
+ movl 36(%edi), %eax
+ movl 32(%edi), %ebx
+ movl 28(%edi), %esi
+ movl (%edi), %ecx
+ movl 4(%edi), %edx
+ addl 20(%edi), %ecx
+ adcl 24(%edi), %edx
+ adcl 8(%edi), %esi
+ adcl 12(%edi), %ebx
+ movl %ebx, -124(%ebp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -128(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -108(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -104(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -100(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -96(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ sbbl %ebx, %ebx
+ movl %ebx, -116(%ebp) # 4-byte Spill
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_1
+# BB#2:
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_3
+.LBB145_1:
+ leal (%ecx,%ecx), %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+.LBB145_3:
+ movl -96(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ movl -124(%ebp), %edi # 4-byte Reload
+ jb .LBB145_4
+# BB#5:
+ movl $0, -96(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_6
+.LBB145_4:
+ movl %edx, %ebx
+ shldl $1, %ecx, %ebx
+ movl %ebx, -96(%ebp) # 4-byte Spill
+.LBB145_6:
+ movl -100(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_7
+# BB#8:
+ movl $0, -100(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_9
+.LBB145_7:
+ movl %esi, %ebx
+ shldl $1, %edx, %ebx
+ movl %ebx, -100(%ebp) # 4-byte Spill
+.LBB145_9:
+ movl -104(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_10
+# BB#11:
+ movl $0, -104(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_12
+.LBB145_10:
+ movl %edi, %ebx
+ shldl $1, %esi, %ebx
+ movl %ebx, -104(%ebp) # 4-byte Spill
+.LBB145_12:
+ movl -108(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_13
+# BB#14:
+ movl $0, -108(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_15
+.LBB145_13:
+ movl %eax, %ebx
+ shldl $1, %edi, %ebx
+ movl %ebx, -108(%ebp) # 4-byte Spill
+.LBB145_15:
+ movl %ecx, -72(%ebp)
+ movl %edx, -68(%ebp)
+ movl %esi, -64(%ebp)
+ movl %edi, -60(%ebp)
+ movl %eax, -56(%ebp)
+ movl %ecx, -92(%ebp)
+ movl %edx, -88(%ebp)
+ movl %esi, -84(%ebp)
+ movl %edi, -80(%ebp)
+ movl %eax, -76(%ebp)
+ movl -128(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_16
+# BB#17:
+ movl $0, -124(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_18
+.LBB145_16:
+ shrl $31, %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+.LBB145_18:
+ leal -52(%ebp), %eax
+ movl %eax, (%esp)
+ leal -72(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -92(%ebp), %eax
+ movl %eax, 8(%esp)
+ movl -116(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -120(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre5L@PLT
+ movl -112(%ebp), %edi # 4-byte Reload
+ addl -32(%ebp), %edi
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl -100(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ movl -104(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ movl -108(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -108(%ebp) # 4-byte Spill
+ adcl -124(%ebp), %esi # 4-byte Folded Reload
+ movl -52(%ebp), %edx
+ movl 8(%ebp), %eax
+ subl (%eax), %edx
+ movl -48(%ebp), %ebx
+ sbbl 4(%eax), %ebx
+ movl -44(%ebp), %ecx
+ sbbl 8(%eax), %ecx
+ movl %ecx, -116(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ecx
+ sbbl 12(%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -36(%ebp), %ecx
+ sbbl 16(%eax), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ movl 20(%eax), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 28(%eax), %ecx
+ movl %ecx, -132(%ebp) # 4-byte Spill
+ sbbl %ecx, -100(%ebp) # 4-byte Folded Spill
+ movl 32(%eax), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ sbbl %ecx, -104(%ebp) # 4-byte Folded Spill
+ movl 36(%eax), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ sbbl %ecx, -108(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ movl 40(%eax), %ecx
+ movl %ecx, -160(%ebp) # 4-byte Spill
+ subl %ecx, %edx
+ movl 44(%eax), %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 48(%eax), %ecx
+ movl %ecx, -168(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 52(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl -144(%ebp), %edi # 4-byte Reload
+ sbbl %ecx, %edi
+ movl 56(%eax), %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 60(%eax), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 64(%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 68(%eax), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ sbbl %ecx, -100(%ebp) # 4-byte Folded Spill
+ movl 72(%eax), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ sbbl %ecx, -104(%ebp) # 4-byte Folded Spill
+ movl 76(%eax), %ecx
+ movl %ecx, -156(%ebp) # 4-byte Spill
+ sbbl %ecx, -108(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ addl -124(%ebp), %edx # 4-byte Folded Reload
+ adcl -128(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 20(%eax)
+ movl -116(%ebp), %ecx # 4-byte Reload
+ adcl -132(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 24(%eax)
+ adcl -136(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 28(%eax)
+ movl -120(%ebp), %edx # 4-byte Reload
+ adcl -140(%ebp), %edx # 4-byte Folded Reload
+ movl %edi, 32(%eax)
+ movl -112(%ebp), %ecx # 4-byte Reload
+ adcl -160(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 36(%eax)
+ movl -96(%ebp), %edx # 4-byte Reload
+ adcl -164(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 40(%eax)
+ movl -100(%ebp), %ecx # 4-byte Reload
+ adcl -168(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 44(%eax)
+ movl -104(%ebp), %edx # 4-byte Reload
+ adcl -172(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 48(%eax)
+ movl -108(%ebp), %ecx # 4-byte Reload
+ adcl -176(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 52(%eax)
+ adcl -180(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 56(%eax)
+ movl %esi, 60(%eax)
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 64(%eax)
+ movl -148(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 68(%eax)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 72(%eax)
+ movl -156(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 76(%eax)
+ addl $188, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end145:
+ .size mcl_fpDbl_sqrPre10L, .Lfunc_end145-mcl_fpDbl_sqrPre10L
+
+ .globl mcl_fp_mont10L
+ .align 16, 0x90
+ .type mcl_fp_mont10L,@function
+mcl_fp_mont10L: # @mcl_fp_mont10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1036, %esp # imm = 0x40C
+ calll .L146$pb
+.L146$pb:
+ popl %ebx
+.Ltmp17:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp17-.L146$pb), %ebx
+ movl 1068(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 992(%esp), %edi
+ movl 996(%esp), %ebp
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1032(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1028(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1024(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1020(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1016(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1012(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1008(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1004(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1000(%esp), %esi
+ movl %eax, (%esp)
+ leal 944(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ addl 944(%esp), %edi
+ adcl 948(%esp), %ebp
+ adcl 952(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 1064(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 896(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ addl 896(%esp), %ebp
+ adcl 900(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 936(%esp), %edi
+ sbbl %eax, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 848(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ movl 64(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 848(%esp), %ebp
+ adcl 852(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 856(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 864(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 868(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 872(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 876(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 880(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 884(%esp), %ebp
+ adcl 888(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 800(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ addl 800(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 832(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 836(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %esi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1068(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 752(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 780(%esp), %esi
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 784(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 704(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 716(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 728(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 732(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 736(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 744(%esp), %edi
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 656(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %esi
+ movl %esi, %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ addl 656(%esp), %eax
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 676(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 688(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 696(%esp), %edi
+ adcl $0, %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 608(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 624(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 636(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 640(%esp), %esi
+ adcl 644(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 648(%esp), %edi
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 572(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 592(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 600(%esp), %edi
+ adcl $0, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 512(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 520(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 548(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 464(%esp), %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 472(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 484(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 496(%esp), %ebp
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 504(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 416(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 432(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 444(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 452(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 368(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 380(%esp), %esi
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 384(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 400(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 320(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 320(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 328(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 332(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 348(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 360(%esp), %ebp
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 272(%esp), %esi
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 276(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 288(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 312(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl $0, %ebp
+ movl 1064(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl %edi, %ecx
+ addl 224(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 236(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 240(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 264(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %ebp
+ addl 176(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 192(%esp), %esi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 196(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 1064(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 128(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 140(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ adcl 144(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 168(%esp), %ebp
+ sbbl %esi, %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 80(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %esi
+ addl 80(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 84(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 92(%esp), %ebx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 120(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl 1068(%esp), %edx
+ subl (%edx), %eax
+ sbbl 4(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ sbbl 8(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 12(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ sbbl 16(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ sbbl 20(%edx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ sbbl 24(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edx), %ecx
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 32(%edx), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ sbbl 36(%edx), %ebp
+ movl %ebp, %edx
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB146_2
+# BB#1:
+ movl %ecx, 48(%esp) # 4-byte Spill
+.LBB146_2:
+ movl %esi, %ecx
+ testb %cl, %cl
+ movl 76(%esp), %esi # 4-byte Reload
+ jne .LBB146_4
+# BB#3:
+ movl %eax, %esi
+.LBB146_4:
+ movl 1056(%esp), %eax
+ movl %esi, (%eax)
+ movl 60(%esp), %edi # 4-byte Reload
+ jne .LBB146_6
+# BB#5:
+ movl 16(%esp), %edi # 4-byte Reload
+.LBB146_6:
+ movl %edi, 4(%eax)
+ jne .LBB146_8
+# BB#7:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB146_8:
+ movl %ebx, 8(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB146_10
+# BB#9:
+ movl 24(%esp), %ebp # 4-byte Reload
+.LBB146_10:
+ movl %ebp, 12(%eax)
+ jne .LBB146_12
+# BB#11:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB146_12:
+ movl %ecx, 16(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB146_14
+# BB#13:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB146_14:
+ movl %ecx, 20(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB146_16
+# BB#15:
+ movl 56(%esp), %ecx # 4-byte Reload
+.LBB146_16:
+ movl %ecx, 24(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB146_18
+# BB#17:
+ movl 64(%esp), %ecx # 4-byte Reload
+.LBB146_18:
+ movl %ecx, 32(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB146_20
+# BB#19:
+ movl %edx, %ecx
+.LBB146_20:
+ movl %ecx, 36(%eax)
+ addl $1036, %esp # imm = 0x40C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end146:
+ .size mcl_fp_mont10L, .Lfunc_end146-mcl_fp_mont10L
+
+ .globl mcl_fp_montNF10L
+ .align 16, 0x90
+ .type mcl_fp_montNF10L,@function
+mcl_fp_montNF10L: # @mcl_fp_montNF10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1020, %esp # imm = 0x3FC
+ calll .L147$pb
+.L147$pb:
+ popl %ebx
+.Ltmp18:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp18-.L147$pb), %ebx
+ movl 1052(%esp), %eax
+ movl -4(%eax), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 976(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 976(%esp), %edi
+ movl 980(%esp), %esi
+ movl %edi, %eax
+ imull %ebp, %eax
+ movl 1016(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1012(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1008(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1004(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1000(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 996(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 992(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 988(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 984(%esp), %ebp
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 928(%esp), %edi
+ adcl 932(%esp), %esi
+ adcl 936(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 952(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 920(%esp), %ecx
+ addl 880(%esp), %esi
+ adcl 884(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 900(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 832(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 832(%esp), %esi
+ adcl 836(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 848(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 872(%esp), %esi
+ movl 1048(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 824(%esp), %ecx
+ addl 784(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 796(%esp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 808(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 820(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 736(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 736(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 760(%esp), %edi
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 764(%esp), %ebp
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 768(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 1044(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ movl 728(%esp), %eax
+ movl 28(%esp), %edx # 4-byte Reload
+ addl 688(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 704(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 708(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 712(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 716(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 720(%esp), %ebp
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 724(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1052(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ addl 640(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 656(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 672(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 676(%esp), %esi
+ movl %esi, %ebp
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 680(%esp), %esi
+ movl 1048(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 632(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 592(%esp), %ecx
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 604(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 624(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 628(%esp), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 544(%esp), %esi
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 548(%esp), %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 552(%esp), %esi
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 556(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 536(%esp), %edx
+ addl 496(%esp), %edi
+ adcl 500(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 504(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 528(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 448(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 448(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 464(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 480(%esp), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %esi # 4-byte Reload
+ adcl 488(%esp), %esi
+ movl 1048(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 400(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 440(%esp), %eax
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 400(%esp), %ecx
+ adcl 404(%esp), %ebp
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 408(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 412(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 416(%esp), %edi
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 420(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 424(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 428(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 432(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 436(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 352(%esp), %esi
+ adcl 356(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 360(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 368(%esp), %esi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 372(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1044(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ movl 344(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 304(%esp), %ecx
+ adcl 308(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 316(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 320(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 324(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 256(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 272(%esp), %edi
+ adcl 276(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 284(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %esi # 4-byte Reload
+ adcl 288(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 248(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 208(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 220(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 224(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 236(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 160(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ adcl 176(%esp), %edi
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 180(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 192(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 152(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 112(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 124(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 128(%esp), %esi
+ movl %esi, %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 140(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 144(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 64(%esp), %ebp
+ movl %edi, %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 68(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebx
+ adcl 80(%esp), %ebp
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 96(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1052(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %ecx
+ movl %ebx, %eax
+ sbbl 8(%edi), %eax
+ movl %ebp, %esi
+ sbbl 12(%edi), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 16(%edi), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 24(%esp), %esi # 4-byte Reload
+ sbbl 20(%edi), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ sbbl 24(%edi), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 28(%edi), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 32(%edi), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 36(%edi), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %esi, %edi
+ sarl $31, %edi
+ testl %edi, %edi
+ movl 60(%esp), %edi # 4-byte Reload
+ js .LBB147_2
+# BB#1:
+ movl %edx, %edi
+.LBB147_2:
+ movl 1040(%esp), %edx
+ movl %edi, (%edx)
+ movl 52(%esp), %edi # 4-byte Reload
+ js .LBB147_4
+# BB#3:
+ movl %ecx, %edi
+.LBB147_4:
+ movl %edi, 4(%edx)
+ js .LBB147_6
+# BB#5:
+ movl %eax, %ebx
+.LBB147_6:
+ movl %ebx, 8(%edx)
+ js .LBB147_8
+# BB#7:
+ movl 4(%esp), %ebp # 4-byte Reload
+.LBB147_8:
+ movl %ebp, 12(%edx)
+ movl 44(%esp), %esi # 4-byte Reload
+ movl 24(%esp), %eax # 4-byte Reload
+ js .LBB147_10
+# BB#9:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB147_10:
+ movl %esi, 16(%edx)
+ js .LBB147_12
+# BB#11:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB147_12:
+ movl %eax, 20(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB147_14
+# BB#13:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB147_14:
+ movl %eax, 24(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB147_16
+# BB#15:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB147_16:
+ movl %eax, 28(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB147_18
+# BB#17:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB147_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB147_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB147_20:
+ movl %eax, 36(%edx)
+ addl $1020, %esp # imm = 0x3FC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end147:
+ .size mcl_fp_montNF10L, .Lfunc_end147-mcl_fp_montNF10L
+
+ .globl mcl_fp_montRed10L
+ .align 16, 0x90
+ .type mcl_fp_montRed10L,@function
+mcl_fp_montRed10L: # @mcl_fp_montRed10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $604, %esp # imm = 0x25C
+ calll .L148$pb
+.L148$pb:
+ popl %eax
+.Ltmp19:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp19-.L148$pb), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 632(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 628(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 76(%ecx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 48(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 44(%ecx), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 40(%ecx), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 28(%ecx), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 24(%ecx), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %edi
+ movl 12(%ecx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 8(%ecx), %esi
+ movl (%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 560(%esp), %ecx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 564(%esp), %ecx
+ adcl 568(%esp), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 576(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 580(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 68(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 512(%esp), %esi
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 516(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 520(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 524(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 528(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 532(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 536(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 540(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 544(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 548(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 552(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 464(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 492(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 416(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 440(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ movl 60(%esp), %edi # 4-byte Reload
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 632(%esp), %eax
+ movl %eax, %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 368(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 404(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 320(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 320(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %edi # 4-byte Reload
+ adcl 344(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 352(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 360(%esp), %esi
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 272(%esp), %ebp
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 276(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 288(%esp), %ebp
+ adcl 292(%esp), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 296(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 308(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 312(%esp), %esi
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 96(%esp), %eax # 4-byte Reload
+ addl 224(%esp), %eax
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 232(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 236(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 240(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl 244(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 248(%esp), %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 256(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 260(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 264(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %eax, %edi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 176(%esp), %edi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 180(%esp), %ecx
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 184(%esp), %edi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 196(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 128(%esp), %esi
+ movl %edi, %eax
+ adcl 132(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 140(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ adcl 144(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 76(%esp), %ebx # 4-byte Reload
+ adcl 164(%esp), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ subl 12(%esp), %edi # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ sbbl 16(%esp), %esi # 4-byte Folded Reload
+ sbbl 20(%esp), %edx # 4-byte Folded Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ sbbl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 96(%esp) # 4-byte Spill
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB148_2
+# BB#1:
+ movl %edx, 80(%esp) # 4-byte Spill
+.LBB148_2:
+ testb %al, %al
+ movl 112(%esp), %edx # 4-byte Reload
+ jne .LBB148_4
+# BB#3:
+ movl %edi, %edx
+.LBB148_4:
+ movl 624(%esp), %edi
+ movl %edx, (%edi)
+ movl 108(%esp), %edx # 4-byte Reload
+ jne .LBB148_6
+# BB#5:
+ movl %ecx, 124(%esp) # 4-byte Spill
+.LBB148_6:
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%edi)
+ movl 116(%esp), %ecx # 4-byte Reload
+ jne .LBB148_8
+# BB#7:
+ movl %esi, %ecx
+.LBB148_8:
+ movl %ecx, 8(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edi)
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ jne .LBB148_10
+# BB#9:
+ movl 64(%esp), %eax # 4-byte Reload
+.LBB148_10:
+ movl %eax, 16(%edi)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl 104(%esp), %ebp # 4-byte Reload
+ jne .LBB148_12
+# BB#11:
+ movl 68(%esp), %ebp # 4-byte Reload
+.LBB148_12:
+ movl %ebp, 20(%edi)
+ movl 88(%esp), %ebx # 4-byte Reload
+ jne .LBB148_14
+# BB#13:
+ movl 72(%esp), %ebx # 4-byte Reload
+.LBB148_14:
+ movl %ebx, 24(%edi)
+ jne .LBB148_16
+# BB#15:
+ movl 92(%esp), %edx # 4-byte Reload
+.LBB148_16:
+ movl %edx, 28(%edi)
+ jne .LBB148_18
+# BB#17:
+ movl 100(%esp), %ecx # 4-byte Reload
+.LBB148_18:
+ movl %ecx, 32(%edi)
+ jne .LBB148_20
+# BB#19:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB148_20:
+ movl %eax, 36(%edi)
+ addl $604, %esp # imm = 0x25C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end148:
+ .size mcl_fp_montRed10L, .Lfunc_end148-mcl_fp_montRed10L
+
+ .globl mcl_fp_addPre10L
+ .align 16, 0x90
+ .type mcl_fp_addPre10L,@function
+mcl_fp_addPre10L: # @mcl_fp_addPre10L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl %edx, 28(%ebx)
+ movl %esi, 32(%ebx)
+ movl 36(%eax), %eax
+ movl 36(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 36(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end149:
+ .size mcl_fp_addPre10L, .Lfunc_end149-mcl_fp_addPre10L
+
+ .globl mcl_fp_subPre10L
+ .align 16, 0x90
+ .type mcl_fp_subPre10L,@function
+mcl_fp_subPre10L: # @mcl_fp_subPre10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl %esi, 28(%ebp)
+ movl %edi, 32(%ebp)
+ movl 36(%edx), %edx
+ movl 36(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 36(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end150:
+ .size mcl_fp_subPre10L, .Lfunc_end150-mcl_fp_subPre10L
+
+ .globl mcl_fp_shr1_10L
+ .align 16, 0x90
+ .type mcl_fp_shr1_10L,@function
+mcl_fp_shr1_10L: # @mcl_fp_shr1_10L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 24(%esi)
+ movl 32(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 28(%esi)
+ movl 36(%eax), %eax
+ shrdl $1, %eax, %ecx
+ movl %ecx, 32(%esi)
+ shrl %eax
+ movl %eax, 36(%esi)
+ popl %esi
+ retl
+.Lfunc_end151:
+ .size mcl_fp_shr1_10L, .Lfunc_end151-mcl_fp_shr1_10L
+
+ .globl mcl_fp_add10L
+ .align 16, 0x90
+ .type mcl_fp_add10L,@function
+mcl_fp_add10L: # @mcl_fp_add10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 52(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 48(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl 4(%ebx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %eax
+ adcl 12(%edi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ adcl 20(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%ebx), %esi
+ adcl 24(%edi), %esi
+ movl 28(%ebx), %ebp
+ adcl 28(%edi), %ebp
+ movl 32(%ebx), %edx
+ adcl 32(%edi), %edx
+ movl 36(%ebx), %ecx
+ adcl 36(%edi), %ecx
+ movl 44(%esp), %edi
+ movl (%esp), %ebx # 4-byte Reload
+ movl %ebx, (%edi)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%edi)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edi)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%edi)
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%edi)
+ movl %esi, 24(%edi)
+ movl %ebp, 28(%edi)
+ movl %edx, 32(%edi)
+ movl %ecx, 36(%edi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 56(%esp), %edi
+ subl (%edi), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 20(%esp), %ebx # 4-byte Reload
+ sbbl 4(%edi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %ebx # 4-byte Reload
+ sbbl 8(%edi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ sbbl 12(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebx # 4-byte Reload
+ sbbl 16(%edi), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 4(%esp), %ebx # 4-byte Reload
+ sbbl 20(%edi), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ sbbl 24(%edi), %esi
+ sbbl 28(%edi), %ebp
+ sbbl 32(%edi), %edx
+ sbbl 36(%edi), %ecx
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB152_2
+# BB#1: # %nocarry
+ movl (%esp), %edi # 4-byte Reload
+ movl 44(%esp), %ebx
+ movl %edi, (%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebx)
+ movl %esi, 24(%ebx)
+ movl %ebp, 28(%ebx)
+ movl %edx, 32(%ebx)
+ movl %ecx, 36(%ebx)
+.LBB152_2: # %carry
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end152:
+ .size mcl_fp_add10L, .Lfunc_end152-mcl_fp_add10L
+
+ .globl mcl_fp_addNF10L
+ .align 16, 0x90
+ .type mcl_fp_addNF10L,@function
+mcl_fp_addNF10L: # @mcl_fp_addNF10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 100(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %esi
+ movl 96(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 4(%edx), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 36(%ecx), %edi
+ movl 32(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %ebx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %esi
+ adcl 8(%edx), %esi
+ adcl 12(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 16(%edx), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 20(%edx), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 24(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 28(%edx), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 32(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ adcl 36(%edx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 104(%esp), %edi
+ movl 52(%esp), %edx # 4-byte Reload
+ subl (%edi), %edx
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 4(%edi), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %ecx, %esi
+ sbbl 8(%edi), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ sbbl 20(%edi), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ movl %esi, %ebp
+ sbbl 24(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ movl %esi, %ebx
+ sbbl 32(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, %esi
+ sbbl 36(%edi), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %esi, %edi
+ movl 52(%esp), %esi # 4-byte Reload
+ sarl $31, %edi
+ testl %edi, %edi
+ js .LBB153_2
+# BB#1:
+ movl %edx, %esi
+.LBB153_2:
+ movl 92(%esp), %edx
+ movl %esi, (%edx)
+ movl 56(%esp), %esi # 4-byte Reload
+ js .LBB153_4
+# BB#3:
+ movl (%esp), %esi # 4-byte Reload
+.LBB153_4:
+ movl %esi, 4(%edx)
+ movl %ebp, %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ js .LBB153_6
+# BB#5:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB153_6:
+ movl %ecx, 8(%edx)
+ movl %ebx, %ecx
+ movl 44(%esp), %ebp # 4-byte Reload
+ js .LBB153_8
+# BB#7:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB153_8:
+ movl %esi, 12(%edx)
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 48(%esp), %ebx # 4-byte Reload
+ js .LBB153_10
+# BB#9:
+ movl 12(%esp), %ebp # 4-byte Reload
+.LBB153_10:
+ movl %ebp, 16(%edx)
+ js .LBB153_12
+# BB#11:
+ movl 16(%esp), %ebx # 4-byte Reload
+.LBB153_12:
+ movl %ebx, 20(%edx)
+ js .LBB153_14
+# BB#13:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB153_14:
+ movl %edi, 24(%edx)
+ js .LBB153_16
+# BB#15:
+ movl 24(%esp), %esi # 4-byte Reload
+.LBB153_16:
+ movl %esi, 28(%edx)
+ js .LBB153_18
+# BB#17:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB153_18:
+ movl %ecx, 32(%edx)
+ js .LBB153_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB153_20:
+ movl %eax, 36(%edx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end153:
+ .size mcl_fp_addNF10L, .Lfunc_end153-mcl_fp_addNF10L
+
+ .globl mcl_fp_sub10L
+ .align 16, 0x90
+ .type mcl_fp_sub10L,@function
+mcl_fp_sub10L: # @mcl_fp_sub10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ xorl %ebx, %ebx
+ movl 60(%esp), %edi
+ subl (%edi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 20(%esi), %edx
+ sbbl 20(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 24(%esi), %ecx
+ sbbl 24(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 32(%esi), %ebp
+ sbbl 32(%edi), %ebp
+ movl 36(%esi), %esi
+ sbbl 36(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 52(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl %edx, 20(%ebx)
+ movl %ecx, 24(%ebx)
+ movl %eax, 28(%ebx)
+ movl %ebp, 32(%ebx)
+ movl %esi, 36(%ebx)
+ je .LBB154_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 64(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl %eax, 28(%ebx)
+ movl 32(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 32(%ebx)
+ movl 36(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 36(%ebx)
+.LBB154_2: # %nocarry
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end154:
+ .size mcl_fp_sub10L, .Lfunc_end154-mcl_fp_sub10L
+
+ .globl mcl_fp_subNF10L
+ .align 16, 0x90
+ .type mcl_fp_subNF10L,@function
+mcl_fp_subNF10L: # @mcl_fp_subNF10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 80(%esp), %eax
+ movl 36(%eax), %esi
+ movl (%eax), %edi
+ movl 4(%eax), %edx
+ movl 84(%esp), %ecx
+ subl (%ecx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sbbl 4(%ecx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 32(%eax), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl 24(%eax), %ebx
+ movl 20(%eax), %ebp
+ movl 16(%eax), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 12(%eax), %edx
+ movl 8(%eax), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 16(%ecx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ sbbl 28(%ecx), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ sbbl 32(%ecx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl 36(%ecx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ sarl $31, %eax
+ movl %eax, %edx
+ addl %edx, %edx
+ movl %eax, %ecx
+ adcl %ecx, %ecx
+ movl %esi, %ebx
+ shrl $31, %ebx
+ orl %edx, %ebx
+ movl 88(%esp), %edi
+ movl 20(%edi), %edx
+ andl %ecx, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 12(%edi), %edx
+ andl %ecx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ andl 4(%edi), %ecx
+ movl 16(%edi), %edx
+ andl %ebx, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 8(%edi), %edx
+ andl %ebx, %edx
+ andl (%edi), %ebx
+ movl 36(%edi), %esi
+ andl %eax, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 32(%edi), %ebp
+ andl %eax, %ebp
+ movl 28(%edi), %esi
+ andl %eax, %esi
+ andl 24(%edi), %eax
+ addl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %edi
+ movl %ebx, (%edi)
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 4(%edi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 8(%edi)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 12(%edi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 16(%edi)
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 20(%edi)
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 24(%edi)
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %esi, 28(%edi)
+ movl %ebp, 32(%edi)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%edi)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end155:
+ .size mcl_fp_subNF10L, .Lfunc_end155-mcl_fp_subNF10L
+
+ .globl mcl_fpDbl_add10L
+ .align 16, 0x90
+ .type mcl_fpDbl_add10L,@function
+mcl_fpDbl_add10L: # @mcl_fpDbl_add10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 100(%esp), %edx
+ movl 96(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %ecx
+ movl 8(%edx), %ebx
+ movl (%edx), %ebp
+ addl (%edi), %ebp
+ movl 92(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%edx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%edx), %esi
+ adcl 16(%edx), %ecx
+ movl %ebp, 4(%eax)
+ movl 48(%edx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %ecx, 16(%eax)
+ movl 24(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 28(%edx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %ecx, 24(%eax)
+ movl 32(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 36(%edx), %ebx
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %ecx, 32(%eax)
+ movl 40(%edi), %ecx
+ adcl %ebx, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 44(%edx), %ebx
+ movl %esi, 36(%eax)
+ movl 44(%edi), %eax
+ adcl %ebx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 48(%edi), %eax
+ adcl %ebp, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ movl 52(%edi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 56(%edx), %eax
+ movl 56(%edi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 60(%edx), %eax
+ movl 60(%edi), %ecx
+ adcl %eax, %ecx
+ movl 64(%edx), %esi
+ movl 64(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 68(%edx), %ebx
+ movl 68(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 72(%edx), %ebx
+ movl 72(%edi), %ebp
+ adcl %ebx, %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 76(%edx), %edx
+ movl 76(%edi), %edi
+ adcl %edx, %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 104(%esp), %ebx
+ movl 64(%esp), %edi # 4-byte Reload
+ subl (%ebx), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebx), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebx), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ sbbl 20(%ebx), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 24(%ebx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ sbbl 28(%ebx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 36(%esp), %ebp # 4-byte Reload
+ sbbl 32(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ sbbl 36(%ebx), %edi
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB156_2
+# BB#1:
+ movl %edi, %ebp
+.LBB156_2:
+ testb %dl, %dl
+ movl 64(%esp), %edx # 4-byte Reload
+ movl 60(%esp), %esi # 4-byte Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ jne .LBB156_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB156_4:
+ movl 92(%esp), %eax
+ movl %edx, 40(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ movl %edx, 44(%eax)
+ movl %ebx, 48(%eax)
+ movl %edi, 52(%eax)
+ movl %esi, 56(%eax)
+ movl %ecx, 60(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB156_6
+# BB#5:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB156_6:
+ movl %ecx, 64(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB156_8
+# BB#7:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB156_8:
+ movl %edx, 68(%eax)
+ jne .LBB156_10
+# BB#9:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB156_10:
+ movl %ecx, 72(%eax)
+ movl %ebp, 76(%eax)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end156:
+ .size mcl_fpDbl_add10L, .Lfunc_end156-mcl_fpDbl_add10L
+
+ .globl mcl_fpDbl_sub10L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub10L,@function
+mcl_fpDbl_sub10L: # @mcl_fpDbl_sub10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ebp
+ movl (%ebp), %edx
+ movl 4(%ebp), %esi
+ movl 88(%esp), %eax
+ subl (%eax), %edx
+ sbbl 4(%eax), %esi
+ movl 8(%ebp), %edi
+ sbbl 8(%eax), %edi
+ movl 80(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 12(%ebp), %edx
+ sbbl 12(%eax), %edx
+ movl %esi, 4(%ecx)
+ movl 16(%ebp), %esi
+ sbbl 16(%eax), %esi
+ movl %edi, 8(%ecx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ecx)
+ movl 20(%ebp), %edx
+ sbbl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ecx)
+ movl 24(%ebp), %esi
+ sbbl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ecx)
+ movl 28(%ebp), %edx
+ sbbl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ecx)
+ movl 32(%ebp), %esi
+ sbbl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ecx)
+ movl 36(%ebp), %edx
+ sbbl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ecx)
+ movl 40(%ebp), %esi
+ sbbl %edi, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 44(%eax), %esi
+ movl %edx, 36(%ecx)
+ movl 44(%ebp), %edx
+ sbbl %esi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%eax), %edx
+ movl 48(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 52(%eax), %edx
+ movl 52(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 56(%eax), %edx
+ movl 56(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 60(%eax), %edx
+ movl 60(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 64(%eax), %edx
+ movl 64(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 68(%eax), %edx
+ movl 68(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 72(%eax), %edx
+ movl 72(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 76(%eax), %eax
+ movl 76(%ebp), %edx
+ sbbl %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 92(%esp), %esi
+ jne .LBB157_1
+# BB#2:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB157_3
+.LBB157_1:
+ movl 36(%esi), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+.LBB157_3:
+ testb %al, %al
+ jne .LBB157_4
+# BB#5:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ movl $0, %ebx
+ jmp .LBB157_6
+.LBB157_4:
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB157_6:
+ jne .LBB157_7
+# BB#8:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB157_9
+.LBB157_7:
+ movl 32(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB157_9:
+ jne .LBB157_10
+# BB#11:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB157_12
+.LBB157_10:
+ movl 28(%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB157_12:
+ jne .LBB157_13
+# BB#14:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB157_15
+.LBB157_13:
+ movl 24(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB157_15:
+ jne .LBB157_16
+# BB#17:
+ movl $0, %ebp
+ jmp .LBB157_18
+.LBB157_16:
+ movl 20(%esi), %ebp
+.LBB157_18:
+ jne .LBB157_19
+# BB#20:
+ movl $0, %eax
+ jmp .LBB157_21
+.LBB157_19:
+ movl 16(%esi), %eax
+.LBB157_21:
+ jne .LBB157_22
+# BB#23:
+ movl $0, %edx
+ jmp .LBB157_24
+.LBB157_22:
+ movl 12(%esi), %edx
+.LBB157_24:
+ jne .LBB157_25
+# BB#26:
+ xorl %esi, %esi
+ jmp .LBB157_27
+.LBB157_25:
+ movl 8(%esi), %esi
+.LBB157_27:
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 40(%ecx)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 44(%ecx)
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 52(%ecx)
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 56(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 60(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 68(%ecx)
+ movl %eax, 72(%ecx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%ecx)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end157:
+ .size mcl_fpDbl_sub10L, .Lfunc_end157-mcl_fpDbl_sub10L
+
+ .align 16, 0x90
+ .type .LmulPv352x32,@function
+.LmulPv352x32: # @mulPv352x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl %edx, %ebx
+ movl 92(%esp), %edi
+ movl %edi, %eax
+ mull 40(%ebx)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 36(%ebx)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 32(%ebx)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 28(%ebx)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 24(%ebx)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 20(%ebx)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 16(%ebx)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 12(%ebx)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 8(%ebx)
+ movl %edx, %esi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 4(%ebx)
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull (%ebx)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%ecx)
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 44(%ecx)
+ movl %ecx, %eax
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end158:
+ .size .LmulPv352x32, .Lfunc_end158-.LmulPv352x32
+
+ .globl mcl_fp_mulUnitPre11L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre11L,@function
+mcl_fp_mulUnitPre11L: # @mcl_fp_mulUnitPre11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L159$pb
+.L159$pb:
+ popl %ebx
+.Ltmp20:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp20-.L159$pb), %ebx
+ movl 120(%esp), %eax
+ movl %eax, (%esp)
+ leal 40(%esp), %ecx
+ movl 116(%esp), %edx
+ calll .LmulPv352x32
+ movl 84(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp
+ movl 56(%esp), %ebx
+ movl 52(%esp), %edi
+ movl 48(%esp), %esi
+ movl 40(%esp), %edx
+ movl 44(%esp), %ecx
+ movl 112(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end159:
+ .size mcl_fp_mulUnitPre11L, .Lfunc_end159-mcl_fp_mulUnitPre11L
+
+ .globl mcl_fpDbl_mulPre11L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre11L,@function
+mcl_fpDbl_mulPre11L: # @mcl_fpDbl_mulPre11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $620, %esp # imm = 0x26C
+ calll .L160$pb
+.L160$pb:
+ popl %eax
+.Ltmp21:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp21-.L160$pb), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 648(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 644(%esp), %edx
+ movl %edx, %ebp
+ movl %ebx, %edi
+ calll .LmulPv352x32
+ movl 612(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 584(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 580(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 568(%esp), %eax
+ movl 572(%esp), %esi
+ movl 640(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 648(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 520(%esp), %ecx
+ movl %ebp, %edx
+ movl %edi, %ebx
+ calll .LmulPv352x32
+ addl 520(%esp), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 540(%esp), %ebx
+ movl 536(%esp), %edi
+ movl 532(%esp), %esi
+ movl 524(%esp), %ecx
+ movl 528(%esp), %edx
+ movl 640(%esp), %eax
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 472(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 516(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 504(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 492(%esp), %ebp
+ movl 488(%esp), %edi
+ movl 484(%esp), %esi
+ movl 476(%esp), %ecx
+ movl 480(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 444(%esp), %ebx
+ movl 440(%esp), %edi
+ movl 436(%esp), %esi
+ movl 428(%esp), %ecx
+ movl 432(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 376(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 396(%esp), %ebp
+ movl 392(%esp), %edi
+ movl 388(%esp), %esi
+ movl 380(%esp), %ecx
+ movl 384(%esp), %edx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 328(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 348(%esp), %ebx
+ movl 344(%esp), %edi
+ movl 340(%esp), %esi
+ movl 332(%esp), %ecx
+ movl 336(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 280(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 300(%esp), %ebp
+ movl 296(%esp), %edi
+ movl 292(%esp), %esi
+ movl 284(%esp), %ecx
+ movl 288(%esp), %edx
+ movl 640(%esp), %eax
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 232(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 252(%esp), %ebx
+ movl 248(%esp), %edi
+ movl 244(%esp), %esi
+ movl 236(%esp), %ecx
+ movl 240(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 184(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 204(%esp), %ebp
+ movl 200(%esp), %edi
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 640(%esp), %eax
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %edi
+ movl 36(%edi), %eax
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 644(%esp), %eax
+ movl %eax, %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 136(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 164(%esp), %ebp
+ movl 160(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 156(%esp), %edi
+ movl 152(%esp), %esi
+ movl 148(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 36(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 88(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 92(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 112(%esp), %edi
+ movl 108(%esp), %esi
+ movl 104(%esp), %edx
+ movl 100(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 40(%eax)
+ movl %ebp, 44(%eax)
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 60(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 64(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ movl %ecx, 76(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ addl $620, %esp # imm = 0x26C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end160:
+ .size mcl_fpDbl_mulPre11L, .Lfunc_end160-mcl_fpDbl_mulPre11L
+
+ .globl mcl_fpDbl_sqrPre11L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre11L,@function
+mcl_fpDbl_sqrPre11L: # @mcl_fpDbl_sqrPre11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $620, %esp # imm = 0x26C
+ calll .L161$pb
+.L161$pb:
+ popl %ebx
+.Ltmp22:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp22-.L161$pb), %ebx
+ movl %ebx, 84(%esp) # 4-byte Spill
+ movl 644(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl %edx, %esi
+ movl %ebx, %edi
+ calll .LmulPv352x32
+ movl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 584(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 580(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 568(%esp), %eax
+ movl 572(%esp), %ebp
+ movl 640(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %esi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 520(%esp), %ecx
+ movl %edi, %ebx
+ calll .LmulPv352x32
+ addl 520(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 540(%esp), %ebx
+ movl 536(%esp), %edi
+ movl 532(%esp), %esi
+ movl 524(%esp), %ecx
+ movl 528(%esp), %edx
+ movl 640(%esp), %eax
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 472(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 516(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 508(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 504(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 492(%esp), %ebp
+ movl 488(%esp), %edi
+ movl 484(%esp), %esi
+ movl 476(%esp), %ecx
+ movl 480(%esp), %edx
+ movl 640(%esp), %eax
+ movl 60(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 48(%esp), %eax # 4-byte Reload
+ addl 424(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 448(%esp), %ebx
+ movl 444(%esp), %edi
+ movl 440(%esp), %esi
+ movl 436(%esp), %edx
+ movl 428(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 432(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 80(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 80(%esp), %eax # 4-byte Reload
+ addl 376(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 404(%esp), %ebx
+ movl 400(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 396(%esp), %edi
+ movl 392(%esp), %esi
+ movl 388(%esp), %edx
+ movl 380(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 384(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 80(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 328(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 348(%esp), %ebp
+ movl 344(%esp), %edi
+ movl 340(%esp), %esi
+ movl 332(%esp), %ecx
+ movl 336(%esp), %edx
+ movl 640(%esp), %eax
+ movl 48(%esp), %ebx # 4-byte Reload
+ movl %ebx, 20(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 48(%esp), %eax # 4-byte Reload
+ addl 280(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 304(%esp), %ebx
+ movl 300(%esp), %edi
+ movl 296(%esp), %esi
+ movl 292(%esp), %edx
+ movl 284(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 288(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 80(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 80(%esp), %eax # 4-byte Reload
+ addl 232(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 260(%esp), %ebx
+ movl 256(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 252(%esp), %edi
+ movl 248(%esp), %esi
+ movl 244(%esp), %edx
+ movl 236(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 240(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 80(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 184(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 204(%esp), %ebp
+ movl 200(%esp), %edi
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 136(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 164(%esp), %ebp
+ movl 160(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 156(%esp), %edi
+ movl 152(%esp), %esi
+ movl 148(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 36(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 88(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 92(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 112(%esp), %edi
+ movl 108(%esp), %esi
+ movl 104(%esp), %edx
+ movl 100(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 40(%eax)
+ movl %ebp, 44(%eax)
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 60(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 64(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ movl %ecx, 76(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ addl $620, %esp # imm = 0x26C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end161:
+ .size mcl_fpDbl_sqrPre11L, .Lfunc_end161-mcl_fpDbl_sqrPre11L
+
+ .globl mcl_fp_mont11L
+ .align 16, 0x90
+ .type mcl_fp_mont11L,@function
+mcl_fp_mont11L: # @mcl_fp_mont11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1132, %esp # imm = 0x46C
+ calll .L162$pb
+.L162$pb:
+ popl %ebx
+.Ltmp23:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp23-.L162$pb), %ebx
+ movl 1164(%esp), %eax
+ movl -4(%eax), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1080(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 1080(%esp), %edi
+ movl 1084(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %ebp, %eax
+ movl 1124(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1120(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1116(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1112(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1108(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 1100(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 1096(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1092(%esp), %esi
+ movl 1088(%esp), %ebp
+ movl %eax, (%esp)
+ leal 1032(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 1032(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1040(%esp), %ebp
+ adcl 1044(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1076(%esp), %esi
+ sbbl %edi, %edi
+ movl 1160(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 984(%esp), %ecx
+ adcl 988(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1024(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 1028(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 936(%esp), %esi
+ adcl 940(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 964(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 980(%esp), %esi
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ addl 888(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 912(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 928(%esp), %esi
+ movl %esi, %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ebp, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 840(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ movl %esi, %eax
+ andl $1, %eax
+ addl 840(%esp), %ebp
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 844(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 848(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 852(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 856(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 864(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 868(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 872(%esp), %ebp
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 876(%esp), %esi
+ adcl 880(%esp), %edi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 884(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 792(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 792(%esp), %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 820(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl 824(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 828(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 836(%esp), %esi
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 744(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 744(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 776(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 788(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1156(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 696(%esp), %ecx
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 716(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 724(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 728(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 648(%esp), %ebp
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 676(%esp), %edi
+ adcl 680(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 692(%esp), %esi
+ adcl $0, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 600(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 24(%esp), %ecx # 4-byte Reload
+ addl 600(%esp), %ecx
+ adcl 604(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 608(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 624(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 640(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %esi
+ movl %esi, %eax
+ addl 552(%esp), %edi
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ adcl 560(%esp), %edi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 564(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 568(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 572(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 576(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 580(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 584(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 588(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 592(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 596(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 28(%esp), %ecx # 4-byte Reload
+ addl 504(%esp), %ecx
+ adcl 508(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 520(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 532(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 536(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 456(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 456(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 460(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 464(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 484(%esp), %edi
+ adcl 488(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 496(%esp), %esi
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 408(%esp), %ecx
+ adcl 412(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 428(%esp), %ebp
+ adcl 432(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 444(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 360(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 368(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 380(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 384(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 312(%esp), %ecx
+ adcl 316(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 332(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 340(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 348(%esp), %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 264(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 276(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 284(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 300(%esp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 304(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 216(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 224(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 232(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 252(%esp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ movl %esi, %ecx
+ andl $1, %ecx
+ addl 168(%esp), %ebp
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 172(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 184(%esp), %ebp
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 188(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl %esi, %ecx
+ addl 120(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 136(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 156(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 20(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ andl $1, %esi
+ addl 72(%esp), %edi
+ movl 48(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 88(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl 1164(%esp), %ebp
+ subl (%ebp), %eax
+ movl %ecx, %edx
+ sbbl 4(%ebp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ sbbl 12(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 28(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ebp), %ebx
+ movl 32(%esp), %edi # 4-byte Reload
+ sbbl 32(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ sbbl 36(%ebp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 40(%ebp), %edi
+ movl %edi, %ebp
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB162_2
+# BB#1:
+ movl %ebx, 28(%esp) # 4-byte Spill
+.LBB162_2:
+ movl %esi, %ebx
+ testb %bl, %bl
+ movl 68(%esp), %ebx # 4-byte Reload
+ jne .LBB162_4
+# BB#3:
+ movl %eax, %ebx
+.LBB162_4:
+ movl 1152(%esp), %eax
+ movl %ebx, (%eax)
+ movl 56(%esp), %edi # 4-byte Reload
+ jne .LBB162_6
+# BB#5:
+ movl %edx, %edi
+.LBB162_6:
+ movl %edi, 4(%eax)
+ movl 52(%esp), %edx # 4-byte Reload
+ jne .LBB162_8
+# BB#7:
+ movl %ecx, %edx
+.LBB162_8:
+ movl %edx, 8(%eax)
+ jne .LBB162_10
+# BB#9:
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+.LBB162_10:
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB162_12
+# BB#11:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB162_12:
+ movl %ecx, 16(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB162_14
+# BB#13:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB162_14:
+ movl %ecx, 20(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ jne .LBB162_16
+# BB#15:
+ movl 16(%esp), %ecx # 4-byte Reload
+.LBB162_16:
+ movl %ecx, 24(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ jne .LBB162_18
+# BB#17:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB162_18:
+ movl %ecx, 32(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB162_20
+# BB#19:
+ movl 60(%esp), %ecx # 4-byte Reload
+.LBB162_20:
+ movl %ecx, 36(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB162_22
+# BB#21:
+ movl %ebp, %ecx
+.LBB162_22:
+ movl %ecx, 40(%eax)
+ addl $1132, %esp # imm = 0x46C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end162:
+ .size mcl_fp_mont11L, .Lfunc_end162-mcl_fp_mont11L
+
+ .globl mcl_fp_montNF11L
+ .align 16, 0x90
+ .type mcl_fp_montNF11L,@function
+mcl_fp_montNF11L: # @mcl_fp_montNF11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1132, %esp # imm = 0x46C
+ calll .L163$pb
+.L163$pb:
+ popl %ebx
+.Ltmp24:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp24-.L163$pb), %ebx
+ movl 1164(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1080(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 1080(%esp), %ebp
+ movl 1084(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 1124(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1120(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1116(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1112(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1108(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1100(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 1096(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1092(%esp), %esi
+ movl 1088(%esp), %edi
+ movl %eax, (%esp)
+ leal 1032(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 1032(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1040(%esp), %edi
+ adcl 1044(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 1048(%esp), %ebp
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 1052(%esp), %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 1028(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 984(%esp), %ecx
+ adcl 988(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 996(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 1000(%esp), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 1004(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ addl 936(%esp), %ebp
+ adcl 940(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 956(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 960(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 980(%esp), %ebp
+ movl 1160(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 932(%esp), %eax
+ addl 888(%esp), %edi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 892(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 896(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 900(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 904(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 908(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 912(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 916(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 920(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 924(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 928(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %edi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 840(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 840(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 860(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 872(%esp), %edi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 876(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 884(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 792(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 836(%esp), %eax
+ movl 44(%esp), %edx # 4-byte Reload
+ addl 792(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 796(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 800(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 804(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 808(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 812(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 816(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 820(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 824(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 828(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 832(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 744(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 744(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 764(%esp), %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 768(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 780(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 784(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 740(%esp), %edx
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 704(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 708(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 712(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 716(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 720(%esp), %edi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 724(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 728(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 732(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 736(%esp), %esi
+ adcl $0, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 648(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 672(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 688(%esp), %esi
+ movl %esi, %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 692(%esp), %esi
+ movl 1160(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 600(%esp), %ecx
+ movl 1156(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ movl 644(%esp), %eax
+ movl 28(%esp), %ecx # 4-byte Reload
+ addl 600(%esp), %ecx
+ adcl 604(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 608(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 612(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 616(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 620(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 624(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 628(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 632(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 636(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 640(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 552(%esp), %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 560(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 576(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 596(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 1160(%esp), %ecx
+ movl %ecx, %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 548(%esp), %edx
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 504(%esp), %eax
+ adcl 508(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 512(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 516(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 520(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 524(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 528(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 532(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 536(%esp), %edi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 540(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 544(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 456(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 456(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 468(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 480(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 488(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 496(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 452(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 408(%esp), %ecx
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 412(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 428(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 444(%esp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 448(%esp), %edi
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 360(%esp), %esi
+ adcl 364(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 372(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 400(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 356(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 312(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 320(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 332(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 340(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 264(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 276(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 284(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 292(%esp), %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 260(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 216(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 224(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 232(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 240(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 244(%esp), %ebp
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 168(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 176(%esp), %esi
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 180(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 196(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 204(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 164(%esp), %edx
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 120(%esp), %ecx
+ adcl 124(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 128(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 136(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 152(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 156(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 72(%esp), %edi
+ movl 48(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 84(%esp), %edi
+ adcl 88(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1164(%esp), %ebx
+ subl (%ebx), %edx
+ movl %ecx, %esi
+ sbbl 4(%ebx), %esi
+ movl %edi, %ecx
+ sbbl 8(%ebx), %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 12(%ebx), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ sbbl 16(%ebx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ sbbl 20(%ebx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ sbbl 24(%ebx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ sbbl 28(%ebx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ sbbl 32(%ebx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ sbbl 36(%ebx), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ sbbl 40(%ebx), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ movl 68(%esp), %ebx # 4-byte Reload
+ js .LBB163_2
+# BB#1:
+ movl %edx, %ebx
+.LBB163_2:
+ movl 1152(%esp), %edx
+ movl %ebx, (%edx)
+ movl 60(%esp), %ebp # 4-byte Reload
+ js .LBB163_4
+# BB#3:
+ movl %esi, %ebp
+.LBB163_4:
+ movl %ebp, 4(%edx)
+ js .LBB163_6
+# BB#5:
+ movl %ecx, %edi
+.LBB163_6:
+ movl %edi, 8(%edx)
+ movl 44(%esp), %ecx # 4-byte Reload
+ js .LBB163_8
+# BB#7:
+ movl %eax, %ecx
+.LBB163_8:
+ movl %ecx, 12(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB163_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB163_10:
+ movl %eax, 16(%edx)
+ movl 28(%esp), %eax # 4-byte Reload
+ js .LBB163_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB163_12:
+ movl %eax, 20(%edx)
+ movl 32(%esp), %eax # 4-byte Reload
+ js .LBB163_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB163_14:
+ movl %eax, 24(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB163_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB163_16:
+ movl %eax, 28(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB163_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB163_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB163_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB163_20:
+ movl %eax, 36(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB163_22
+# BB#21:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB163_22:
+ movl %eax, 40(%edx)
+ addl $1132, %esp # imm = 0x46C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end163:
+ .size mcl_fp_montNF11L, .Lfunc_end163-mcl_fp_montNF11L
+
+ .globl mcl_fp_montRed11L
+ .align 16, 0x90
+ .type mcl_fp_montRed11L,@function
+mcl_fp_montRed11L: # @mcl_fp_montRed11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $668, %esp # imm = 0x29C
+ calll .L164$pb
+.L164$pb:
+ popl %eax
+.Ltmp25:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp25-.L164$pb), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 696(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 692(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 4(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 84(%ecx), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 48(%ecx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 40(%ecx), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 32(%ecx), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 28(%ecx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 24(%ecx), %ebp
+ movl 20(%ecx), %edi
+ movl 16(%ecx), %esi
+ movl 12(%ecx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 616(%esp), %ecx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 616(%esp), %eax
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 620(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 632(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 636(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 640(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 568(%esp), %esi
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 572(%esp), %edx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 600(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 520(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 520(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 524(%esp), %ecx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 548(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 472(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 424(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 428(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 464(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 376(%esp), %esi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 380(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %esi # 4-byte Reload
+ adcl 404(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 412(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 328(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 344(%esp), %edi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 352(%esp), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 356(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 696(%esp), %eax
+ movl %eax, %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 280(%esp), %ebp
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 284(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 296(%esp), %edi
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl 304(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 232(%esp), %ebp
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 236(%esp), %ebp
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 244(%esp), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 276(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 184(%esp), %ebp
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 188(%esp), %ecx
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 136(%esp), %esi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 128(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 148(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 152(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ adcl 180(%esp), %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ subl 12(%esp), %edi # 4-byte Folded Reload
+ sbbl 4(%esp), %edx # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ sbbl 16(%esp), %esi # 4-byte Folded Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ sbbl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ sbbl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl %ebp, %ebx
+ sbbl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB164_2
+# BB#1:
+ movl %esi, 112(%esp) # 4-byte Spill
+.LBB164_2:
+ testb %bl, %bl
+ movl 132(%esp), %esi # 4-byte Reload
+ jne .LBB164_4
+# BB#3:
+ movl %edi, %esi
+.LBB164_4:
+ movl 688(%esp), %edi
+ movl %esi, (%edi)
+ movl 104(%esp), %esi # 4-byte Reload
+ jne .LBB164_6
+# BB#5:
+ movl %edx, 128(%esp) # 4-byte Spill
+.LBB164_6:
+ movl 128(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%edi)
+ movl 116(%esp), %edx # 4-byte Reload
+ jne .LBB164_8
+# BB#7:
+ movl %ecx, %edx
+.LBB164_8:
+ movl %edx, 8(%edi)
+ movl 112(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%edi)
+ movl 92(%esp), %edx # 4-byte Reload
+ movl 124(%esp), %ecx # 4-byte Reload
+ jne .LBB164_10
+# BB#9:
+ movl 64(%esp), %ecx # 4-byte Reload
+.LBB164_10:
+ movl %ecx, 16(%edi)
+ movl 96(%esp), %ecx # 4-byte Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ jne .LBB164_12
+# BB#11:
+ movl 68(%esp), %eax # 4-byte Reload
+.LBB164_12:
+ movl %eax, 20(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl 108(%esp), %ebp # 4-byte Reload
+ jne .LBB164_14
+# BB#13:
+ movl 72(%esp), %ebp # 4-byte Reload
+.LBB164_14:
+ movl %ebp, 24(%edi)
+ jne .LBB164_16
+# BB#15:
+ movl 76(%esp), %esi # 4-byte Reload
+.LBB164_16:
+ movl %esi, 28(%edi)
+ jne .LBB164_18
+# BB#17:
+ movl 84(%esp), %edx # 4-byte Reload
+.LBB164_18:
+ movl %edx, 32(%edi)
+ jne .LBB164_20
+# BB#19:
+ movl 88(%esp), %ecx # 4-byte Reload
+.LBB164_20:
+ movl %ecx, 36(%edi)
+ jne .LBB164_22
+# BB#21:
+ movl 100(%esp), %eax # 4-byte Reload
+.LBB164_22:
+ movl %eax, 40(%edi)
+ addl $668, %esp # imm = 0x29C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end164:
+ .size mcl_fp_montRed11L, .Lfunc_end164-mcl_fp_montRed11L
+
+ .globl mcl_fp_addPre11L
+ .align 16, 0x90
+ .type mcl_fp_addPre11L,@function
+mcl_fp_addPre11L: # @mcl_fp_addPre11L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl %esi, 32(%ebx)
+ movl %edx, 36(%ebx)
+ movl 40(%eax), %eax
+ movl 40(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 40(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end165:
+ .size mcl_fp_addPre11L, .Lfunc_end165-mcl_fp_addPre11L
+
+ .globl mcl_fp_subPre11L
+ .align 16, 0x90
+ .type mcl_fp_subPre11L,@function
+mcl_fp_subPre11L: # @mcl_fp_subPre11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edi, 32(%ebp)
+ movl %esi, 36(%ebp)
+ movl 40(%edx), %edx
+ movl 40(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 40(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end166:
+ .size mcl_fp_subPre11L, .Lfunc_end166-mcl_fp_subPre11L
+
+ .globl mcl_fp_shr1_11L
+ .align 16, 0x90
+ .type mcl_fp_shr1_11L,@function
+mcl_fp_shr1_11L: # @mcl_fp_shr1_11L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 24(%esi)
+ movl 32(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 28(%esi)
+ movl 36(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 32(%esi)
+ movl 40(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 36(%esi)
+ shrl %eax
+ movl %eax, 40(%esi)
+ popl %esi
+ retl
+.Lfunc_end167:
+ .size mcl_fp_shr1_11L, .Lfunc_end167-mcl_fp_shr1_11L
+
+ .globl mcl_fp_add11L
+ .align 16, 0x90
+ .type mcl_fp_add11L,@function
+mcl_fp_add11L: # @mcl_fp_add11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 60(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 56(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl 4(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl 16(%esi), %ecx
+ adcl 12(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl 16(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ adcl 20(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ adcl 24(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 28(%esi), %ebx
+ adcl 28(%edi), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 32(%esi), %ecx
+ adcl 32(%edi), %ecx
+ movl 36(%esi), %eax
+ adcl 36(%edi), %eax
+ movl 40(%esi), %edx
+ adcl 40(%edi), %edx
+ movl 52(%esp), %esi
+ movl %ebp, (%esi)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%esi)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%esi)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%esi)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%esi)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%esi)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%esi)
+ movl %ebx, 28(%esi)
+ movl %ecx, 32(%esi)
+ movl %eax, 36(%esi)
+ movl %edx, 40(%esi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 64(%esp), %ebp
+ movl 4(%esp), %edi # 4-byte Reload
+ subl (%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl (%esp), %edi # 4-byte Reload
+ sbbl 28(%ebp), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 32(%ebp), %ecx
+ sbbl 36(%ebp), %eax
+ sbbl 40(%ebp), %edx
+ movl %edx, %edi
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB168_2
+# BB#1: # %nocarry
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%esi)
+ movl 28(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%esi)
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%esi)
+ movl 20(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%esi)
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%esi)
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 20(%esi)
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%esi)
+ movl (%esp), %edx # 4-byte Reload
+ movl %edx, 28(%esi)
+ movl %ecx, 32(%esi)
+ movl %eax, 36(%esi)
+ movl %edi, 40(%esi)
+.LBB168_2: # %carry
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end168:
+ .size mcl_fp_add11L, .Lfunc_end168-mcl_fp_add11L
+
+ .globl mcl_fp_addNF11L
+ .align 16, 0x90
+ .type mcl_fp_addNF11L,@function
+mcl_fp_addNF11L: # @mcl_fp_addNF11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $80, %esp
+ movl 108(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %ecx
+ movl 104(%esp), %esi
+ addl (%esi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 4(%esi), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%edx), %ebx
+ movl 36(%edx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 20(%edx), %ebp
+ movl 16(%edx), %edi
+ movl 12(%edx), %eax
+ movl 8(%edx), %ecx
+ adcl 8(%esi), %ecx
+ adcl 12(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 24(%esi), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 28(%esi), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 32(%esi), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 36(%esi), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ adcl 40(%esi), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx
+ movl 52(%esp), %esi # 4-byte Reload
+ subl (%ebx), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebx), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl %edx, %ecx
+ sbbl 8(%ebx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ sbbl 12(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%ebx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 24(%ebx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ movl %edi, %ecx
+ movl %edi, %ebp
+ sbbl 36(%ebx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ sbbl 40(%ebx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ movl 52(%esp), %edi # 4-byte Reload
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ js .LBB169_2
+# BB#1:
+ movl %esi, %edi
+.LBB169_2:
+ movl 100(%esp), %esi
+ movl %edi, (%esi)
+ movl 60(%esp), %edi # 4-byte Reload
+ js .LBB169_4
+# BB#3:
+ movl (%esp), %edi # 4-byte Reload
+.LBB169_4:
+ movl %edi, 4(%esi)
+ movl %eax, %edi
+ js .LBB169_6
+# BB#5:
+ movl 4(%esp), %edx # 4-byte Reload
+.LBB169_6:
+ movl %edx, 8(%esi)
+ movl %ebp, %ecx
+ movl 72(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB169_8
+# BB#7:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB169_8:
+ movl %eax, 12(%esi)
+ movl 76(%esp), %eax # 4-byte Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ js .LBB169_10
+# BB#9:
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+.LBB169_10:
+ movl 48(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%esi)
+ js .LBB169_12
+# BB#11:
+ movl 16(%esp), %ebp # 4-byte Reload
+.LBB169_12:
+ movl %ebp, 20(%esi)
+ js .LBB169_14
+# BB#13:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB169_14:
+ movl %edi, 24(%esi)
+ js .LBB169_16
+# BB#15:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB169_16:
+ movl %eax, 28(%esi)
+ js .LBB169_18
+# BB#17:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB169_18:
+ movl %edx, 32(%esi)
+ js .LBB169_20
+# BB#19:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB169_20:
+ movl %ecx, 36(%esi)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB169_22
+# BB#21:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB169_22:
+ movl %eax, 40(%esi)
+ addl $80, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end169:
+ .size mcl_fp_addNF11L, .Lfunc_end169-mcl_fp_addNF11L
+
+ .globl mcl_fp_sub11L
+ .align 16, 0x90
+ .type mcl_fp_sub11L,@function
+mcl_fp_sub11L: # @mcl_fp_sub11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %ebp
+ movl (%ebp), %ecx
+ movl 4(%ebp), %eax
+ movl 68(%esp), %edi
+ subl (%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 8(%ebp), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%ebp), %ebx
+ sbbl 12(%edi), %ebx
+ movl 16(%ebp), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 20(%ebp), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 24(%ebp), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 28(%ebp), %edx
+ sbbl 28(%edi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 32(%ebp), %ecx
+ sbbl 32(%edi), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 36(%ebp), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 40(%ebp), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ movl 16(%esp), %esi # 4-byte Reload
+ movl $0, %ebx
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 60(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl %ebp, 12(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl %edx, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%ebx)
+ movl %ecx, %edi
+ movl %eax, 40(%ebx)
+ je .LBB170_2
+# BB#1: # %carry
+ movl 72(%esp), %eax
+ addl (%eax), %esi
+ movl %esi, (%ebx)
+ movl 28(%esp), %edx # 4-byte Reload
+ movl %eax, %esi
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl %ebp, %eax
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ movl 36(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 36(%ebx)
+ movl 40(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ebx)
+.LBB170_2: # %nocarry
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end170:
+ .size mcl_fp_sub11L, .Lfunc_end170-mcl_fp_sub11L
+
+ .globl mcl_fp_subNF11L
+ .align 16, 0x90
+ .type mcl_fp_subNF11L,@function
+mcl_fp_subNF11L: # @mcl_fp_subNF11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl 88(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 92(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%eax), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 36(%eax), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 28(%eax), %ebx
+ movl 24(%eax), %ebp
+ movl 20(%eax), %esi
+ movl 16(%eax), %edx
+ movl 12(%eax), %ecx
+ movl 8(%eax), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ sbbl 12(%edi), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ sbbl 40(%edi), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sarl $31, %esi
+ movl %esi, %eax
+ shldl $1, %edx, %eax
+ movl 96(%esp), %edx
+ movl 4(%edx), %ecx
+ andl %eax, %ecx
+ movl %ecx, %ebx
+ andl (%edx), %eax
+ movl 40(%edx), %ecx
+ andl %esi, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ andl %esi, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ andl %esi, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ andl %esi, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ andl %esi, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 20(%edx), %ebp
+ andl %esi, %ebp
+ roll %esi
+ movl 16(%edx), %edi
+ andl %esi, %edi
+ movl 12(%edx), %ecx
+ andl %esi, %ecx
+ andl 8(%edx), %esi
+ addl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, %edx
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl 84(%esp), %ebx
+ movl %eax, (%ebx)
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 4(%ebx)
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %esi, 8(%ebx)
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %ecx, 12(%ebx)
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 16(%ebx)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 20(%ebx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl %eax, 36(%ebx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ebx)
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end171:
+ .size mcl_fp_subNF11L, .Lfunc_end171-mcl_fp_subNF11L
+
+ .globl mcl_fpDbl_add11L
+ .align 16, 0x90
+ .type mcl_fpDbl_add11L,@function
+mcl_fpDbl_add11L: # @mcl_fpDbl_add11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $80, %esp
+ movl 108(%esp), %ecx
+ movl 104(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edi), %ebp
+ movl 100(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%ecx), %esi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 52(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%edi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%edi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebx, %esi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%edi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %esi, 36(%eax)
+ movl 44(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 48(%ecx), %esi
+ movl %edx, 40(%eax)
+ movl 48(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 52(%edi), %eax
+ adcl %ebp, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl 56(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%ecx), %edx
+ movl 60(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%ecx), %edx
+ movl 64(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl 68(%edi), %edx
+ adcl %eax, %edx
+ movl 72(%ecx), %esi
+ movl 72(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 76(%ecx), %ebx
+ movl 76(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 80(%ecx), %ebp
+ movl 80(%edi), %ebx
+ adcl %ebp, %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 84(%ecx), %ecx
+ movl 84(%edi), %edi
+ adcl %ecx, %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 112(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ subl (%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %edx, %edi
+ sbbl 24(%ebp), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 28(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl 32(%ebp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl 40(%esp), %ebx # 4-byte Reload
+ sbbl 36(%ebp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ sbbl 40(%ebp), %edi
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB172_2
+# BB#1:
+ movl %edi, %ebx
+.LBB172_2:
+ testb %cl, %cl
+ movl 68(%esp), %ecx # 4-byte Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ movl 60(%esp), %edi # 4-byte Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ jne .LBB172_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB172_4:
+ movl 100(%esp), %eax
+ movl %ecx, 44(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl %ebp, 56(%eax)
+ movl %edi, 60(%eax)
+ movl %esi, 64(%eax)
+ movl %edx, 68(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ jne .LBB172_6
+# BB#5:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB172_6:
+ movl %edx, 72(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ jne .LBB172_8
+# BB#7:
+ movl 32(%esp), %edx # 4-byte Reload
+.LBB172_8:
+ movl %edx, 76(%eax)
+ jne .LBB172_10
+# BB#9:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB172_10:
+ movl %ecx, 80(%eax)
+ movl %ebx, 84(%eax)
+ addl $80, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end172:
+ .size mcl_fpDbl_add11L, .Lfunc_end172-mcl_fpDbl_add11L
+
+ .globl mcl_fpDbl_sub11L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub11L,@function
+mcl_fpDbl_sub11L: # @mcl_fpDbl_sub11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 96(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %esi
+ movl 100(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %esi
+ movl 8(%edx), %edi
+ sbbl 8(%ebp), %edi
+ movl 92(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edx), %eax
+ sbbl 12(%ebp), %eax
+ movl %esi, 4(%ecx)
+ movl 16(%edx), %esi
+ sbbl 16(%ebp), %esi
+ movl %edi, 8(%ecx)
+ movl 20(%ebp), %edi
+ movl %eax, 12(%ecx)
+ movl 20(%edx), %eax
+ sbbl %edi, %eax
+ movl 24(%ebp), %edi
+ movl %esi, 16(%ecx)
+ movl 24(%edx), %esi
+ sbbl %edi, %esi
+ movl 28(%ebp), %edi
+ movl %eax, 20(%ecx)
+ movl 28(%edx), %eax
+ sbbl %edi, %eax
+ movl 32(%ebp), %edi
+ movl %esi, 24(%ecx)
+ movl 32(%edx), %esi
+ sbbl %edi, %esi
+ movl 36(%ebp), %edi
+ movl %eax, 28(%ecx)
+ movl 36(%edx), %eax
+ sbbl %edi, %eax
+ movl 40(%ebp), %edi
+ movl %esi, 32(%ecx)
+ movl 40(%edx), %esi
+ sbbl %edi, %esi
+ movl 44(%ebp), %edi
+ movl %eax, 36(%ecx)
+ movl 44(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%ebp), %eax
+ movl %esi, 40(%ecx)
+ movl 48(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 52(%ebp), %eax
+ movl 52(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 56(%ebp), %eax
+ movl 56(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 60(%ebp), %eax
+ movl 60(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 64(%ebp), %eax
+ movl 64(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 68(%ebp), %eax
+ movl 68(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 72(%ebp), %eax
+ movl 72(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 76(%ebp), %eax
+ movl 76(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 80(%ebp), %eax
+ movl 80(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 84(%ebp), %eax
+ movl 84(%edx), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 104(%esp), %ebp
+ jne .LBB173_1
+# BB#2:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB173_3
+.LBB173_1:
+ movl 40(%ebp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+.LBB173_3:
+ testb %al, %al
+ jne .LBB173_4
+# BB#5:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB173_6
+.LBB173_4:
+ movl (%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 4(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB173_6:
+ jne .LBB173_7
+# BB#8:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB173_9
+.LBB173_7:
+ movl 36(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB173_9:
+ jne .LBB173_10
+# BB#11:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB173_12
+.LBB173_10:
+ movl 32(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB173_12:
+ jne .LBB173_13
+# BB#14:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB173_15
+.LBB173_13:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB173_15:
+ jne .LBB173_16
+# BB#17:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB173_18
+.LBB173_16:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB173_18:
+ jne .LBB173_19
+# BB#20:
+ movl $0, %edx
+ jmp .LBB173_21
+.LBB173_19:
+ movl 20(%ebp), %edx
+.LBB173_21:
+ jne .LBB173_22
+# BB#23:
+ movl $0, %edi
+ jmp .LBB173_24
+.LBB173_22:
+ movl 16(%ebp), %edi
+.LBB173_24:
+ jne .LBB173_25
+# BB#26:
+ movl $0, %ebx
+ jmp .LBB173_27
+.LBB173_25:
+ movl 12(%ebp), %ebx
+.LBB173_27:
+ jne .LBB173_28
+# BB#29:
+ xorl %ebp, %ebp
+ jmp .LBB173_30
+.LBB173_28:
+ movl 8(%ebp), %ebp
+.LBB173_30:
+ movl 8(%esp), %esi # 4-byte Reload
+ addl 36(%esp), %esi # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 44(%ecx)
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 52(%ecx)
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 56(%ecx)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 60(%ecx)
+ movl (%esp), %esi # 4-byte Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 64(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 68(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 76(%ecx)
+ movl %eax, 80(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%ecx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end173:
+ .size mcl_fpDbl_sub11L, .Lfunc_end173-mcl_fpDbl_sub11L
+
+ .align 16, 0x90
+ .type .LmulPv384x32,@function
+.LmulPv384x32: # @mulPv384x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $80, %esp
+ movl %edx, %ebx
+ movl 100(%esp), %ebp
+ movl %ebp, %eax
+ mull 44(%ebx)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 40(%ebx)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 36(%ebx)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 32(%ebx)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 28(%ebx)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 24(%ebx)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 20(%ebx)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 16(%ebx)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 12(%ebx)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 8(%ebx)
+ movl %edx, %edi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 4(%ebx)
+ movl %edx, %esi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull (%ebx)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 8(%ecx)
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 48(%ecx)
+ movl %ecx, %eax
+ addl $80, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end174:
+ .size .LmulPv384x32, .Lfunc_end174-.LmulPv384x32
+
+ .globl mcl_fp_mulUnitPre12L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre12L,@function
+mcl_fp_mulUnitPre12L: # @mcl_fp_mulUnitPre12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L175$pb
+.L175$pb:
+ popl %ebx
+.Ltmp26:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp26-.L175$pb), %ebx
+ movl 120(%esp), %eax
+ movl %eax, (%esp)
+ leal 40(%esp), %ecx
+ movl 116(%esp), %edx
+ calll .LmulPv384x32
+ movl 88(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp
+ movl 56(%esp), %ebx
+ movl 52(%esp), %edi
+ movl 48(%esp), %esi
+ movl 40(%esp), %edx
+ movl 44(%esp), %ecx
+ movl 112(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end175:
+ .size mcl_fp_mulUnitPre12L, .Lfunc_end175-mcl_fp_mulUnitPre12L
+
+ .globl mcl_fpDbl_mulPre12L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre12L,@function
+mcl_fpDbl_mulPre12L: # @mcl_fpDbl_mulPre12L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $220, %esp
+ calll .L176$pb
+.L176$pb:
+ popl %ebx
+.Ltmp27:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp27-.L176$pb), %ebx
+ movl %ebx, -164(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 8(%esp)
+ movl 12(%ebp), %edi
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6L@PLT
+ leal 24(%esi), %eax
+ movl %eax, 8(%esp)
+ leal 24(%edi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 48(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6L@PLT
+ movl 40(%edi), %ebx
+ movl 36(%edi), %eax
+ movl 32(%edi), %edx
+ movl (%edi), %esi
+ movl 4(%edi), %ecx
+ addl 24(%edi), %esi
+ adcl 28(%edi), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ adcl 8(%edi), %edx
+ movl %edx, -188(%ebp) # 4-byte Spill
+ adcl 12(%edi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ adcl 16(%edi), %ebx
+ movl %ebx, -180(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl (%edi), %eax
+ addl 24(%edi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ movl 4(%edi), %eax
+ adcl 28(%edi), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ movl 32(%edi), %eax
+ adcl 8(%edi), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl 36(%edi), %eax
+ adcl 12(%edi), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl 40(%edi), %ecx
+ adcl 16(%edi), %ecx
+ movl 44(%edi), %eax
+ adcl 20(%edi), %eax
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -184(%ebp) # 4-byte Spill
+ movl %ebx, %edi
+ movl %edx, -156(%ebp) # 4-byte Spill
+ movl %esi, -160(%ebp) # 4-byte Spill
+ movl %esi, %edx
+ jb .LBB176_2
+# BB#1:
+ xorl %edi, %edi
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ movl $0, -160(%ebp) # 4-byte Folded Spill
+.LBB176_2:
+ movl %edi, -176(%ebp) # 4-byte Spill
+ movl 12(%ebp), %esi
+ movl 44(%esi), %edi
+ movl -112(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 20(%esi), %edi
+ movl %edi, -132(%ebp) # 4-byte Spill
+ movl %eax, -124(%ebp) # 4-byte Spill
+ movl %ecx, -112(%ebp) # 4-byte Spill
+ movl -148(%ebp), %esi # 4-byte Reload
+ movl %esi, -116(%ebp) # 4-byte Spill
+ movl -144(%ebp), %esi # 4-byte Reload
+ movl %esi, -120(%ebp) # 4-byte Spill
+ movl -140(%ebp), %esi # 4-byte Reload
+ movl %esi, -128(%ebp) # 4-byte Spill
+ movl -136(%ebp), %esi # 4-byte Reload
+ movl %esi, -152(%ebp) # 4-byte Spill
+ jb .LBB176_4
+# BB#3:
+ movl $0, -124(%ebp) # 4-byte Folded Spill
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+ movl $0, -116(%ebp) # 4-byte Folded Spill
+ movl $0, -120(%ebp) # 4-byte Folded Spill
+ movl $0, -128(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+.LBB176_4:
+ movl %edx, -84(%ebp)
+ movl -172(%ebp), %esi # 4-byte Reload
+ movl %esi, -80(%ebp)
+ movl -188(%ebp), %edx # 4-byte Reload
+ movl %edx, -76(%ebp)
+ movl -168(%ebp), %edi # 4-byte Reload
+ movl %edi, -72(%ebp)
+ movl -180(%ebp), %edx # 4-byte Reload
+ movl %edx, -68(%ebp)
+ movl -136(%ebp), %edx # 4-byte Reload
+ movl %edx, -108(%ebp)
+ movl -140(%ebp), %edx # 4-byte Reload
+ movl %edx, -104(%ebp)
+ movl -144(%ebp), %edx # 4-byte Reload
+ movl %edx, -100(%ebp)
+ movl -148(%ebp), %edx # 4-byte Reload
+ movl %edx, -96(%ebp)
+ movl %ecx, -92(%ebp)
+ movl %eax, -88(%ebp)
+ movl %edi, %ebx
+ sbbl %edx, %edx
+ movl -132(%ebp), %eax # 4-byte Reload
+ movl %eax, -64(%ebp)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB176_6
+# BB#5:
+ movl $0, %eax
+ movl $0, %ebx
+ movl $0, %esi
+.LBB176_6:
+ movl %eax, -132(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -108(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -84(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -60(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -152(%ebp), %edi # 4-byte Reload
+ addl -160(%ebp), %edi # 4-byte Folded Reload
+ adcl %esi, -128(%ebp) # 4-byte Folded Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl %eax, -120(%ebp) # 4-byte Folded Spill
+ adcl %ebx, -116(%ebp) # 4-byte Folded Spill
+ movl -176(%ebp), %eax # 4-byte Reload
+ adcl %eax, -112(%ebp) # 4-byte Folded Spill
+ movl -132(%ebp), %eax # 4-byte Reload
+ adcl %eax, -124(%ebp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -132(%ebp) # 4-byte Spill
+ movl -164(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre6L@PLT
+ addl -36(%ebp), %edi
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -112(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+ adcl %esi, -132(%ebp) # 4-byte Folded Spill
+ movl -60(%ebp), %ecx
+ movl 8(%ebp), %eax
+ subl (%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -56(%ebp), %esi
+ sbbl 4(%eax), %esi
+ movl -52(%ebp), %ecx
+ sbbl 8(%eax), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ movl -48(%ebp), %edx
+ sbbl 12(%eax), %edx
+ movl -44(%ebp), %ebx
+ sbbl 16(%eax), %ebx
+ movl -40(%ebp), %ecx
+ sbbl 20(%eax), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 28(%eax), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 32(%eax), %ecx
+ movl %ecx, -156(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 36(%eax), %ecx
+ movl %ecx, -160(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 40(%eax), %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 44(%eax), %ecx
+ movl %ecx, -168(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, -132(%ebp) # 4-byte Folded Spill
+ movl 48(%eax), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ subl %ecx, -144(%ebp) # 4-byte Folded Spill
+ movl 52(%eax), %ecx
+ movl %ecx, -196(%ebp) # 4-byte Spill
+ sbbl %ecx, %esi
+ movl 56(%eax), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ sbbl %ecx, -136(%ebp) # 4-byte Folded Spill
+ movl 60(%eax), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 64(%eax), %ecx
+ movl %ecx, -208(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 68(%eax), %ecx
+ movl %ecx, -212(%ebp) # 4-byte Spill
+ sbbl %ecx, -140(%ebp) # 4-byte Folded Spill
+ movl 72(%eax), %ecx
+ movl %ecx, -216(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 76(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 80(%eax), %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 84(%eax), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 88(%eax), %ecx
+ movl %ecx, -184(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 92(%eax), %ecx
+ movl %ecx, -188(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, -132(%ebp) # 4-byte Folded Spill
+ movl -144(%ebp), %ecx # 4-byte Reload
+ addl -148(%ebp), %ecx # 4-byte Folded Reload
+ adcl -152(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 24(%eax)
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -156(%ebp), %ecx # 4-byte Folded Reload
+ movl %esi, 28(%eax)
+ adcl -160(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 32(%eax)
+ adcl -164(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 36(%eax)
+ movl -140(%ebp), %ecx # 4-byte Reload
+ adcl -168(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 40(%eax)
+ adcl -192(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 44(%eax)
+ movl -128(%ebp), %ecx # 4-byte Reload
+ adcl -196(%ebp), %ecx # 4-byte Folded Reload
+ movl %edi, 48(%eax)
+ movl -120(%ebp), %edx # 4-byte Reload
+ adcl -200(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ movl -116(%ebp), %ecx # 4-byte Reload
+ adcl -204(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ movl -112(%ebp), %edx # 4-byte Reload
+ adcl -208(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ movl -124(%ebp), %ecx # 4-byte Reload
+ adcl -212(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ movl -132(%ebp), %edx # 4-byte Reload
+ adcl -216(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl %edx, 72(%eax)
+ movl -172(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 76(%eax)
+ movl -176(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 80(%eax)
+ movl -180(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 88(%eax)
+ movl -188(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 92(%eax)
+ addl $220, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end176:
+ .size mcl_fpDbl_mulPre12L, .Lfunc_end176-mcl_fpDbl_mulPre12L
+
+ .globl mcl_fpDbl_sqrPre12L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre12L,@function
+mcl_fpDbl_sqrPre12L: # @mcl_fpDbl_sqrPre12L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $220, %esp
+ calll .L177$pb
+.L177$pb:
+ popl %ebx
+.Ltmp28:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp28-.L177$pb), %ebx
+ movl %ebx, -152(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre6L@PLT
+ leal 24(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 48(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6L@PLT
+ movl 44(%edi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ movl 40(%edi), %edx
+ movl 36(%edi), %eax
+ movl (%edi), %ebx
+ movl 4(%edi), %esi
+ addl 24(%edi), %ebx
+ adcl 28(%edi), %esi
+ movl 32(%edi), %ecx
+ adcl 8(%edi), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ adcl 12(%edi), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ adcl 16(%edi), %edx
+ movl %edx, %ecx
+ movl -136(%ebp), %eax # 4-byte Reload
+ adcl 20(%edi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edx
+ movl %edx, -156(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edx
+ popl %eax
+ movl %edx, -124(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edx
+ sbbl %edi, %edi
+ movl %edi, -148(%ebp) # 4-byte Spill
+ movl %ebx, %edi
+ addl %edi, %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+ movl %esi, %edi
+ movl %esi, %eax
+ adcl %edi, %edi
+ movl %edi, -132(%ebp) # 4-byte Spill
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB177_2
+# BB#1:
+ movl $0, -132(%ebp) # 4-byte Folded Spill
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+.LBB177_2:
+ movl -144(%ebp), %esi # 4-byte Reload
+ addl %esi, %esi
+ movl -140(%ebp), %edx # 4-byte Reload
+ adcl %edx, %edx
+ movl %edx, -116(%ebp) # 4-byte Spill
+ movl -120(%ebp), %edx # 4-byte Reload
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB177_3
+# BB#4:
+ movl $0, -116(%ebp) # 4-byte Folded Spill
+ movl $0, -120(%ebp) # 4-byte Folded Spill
+ jmp .LBB177_5
+.LBB177_3:
+ movl %eax, %edx
+ shrl $31, %edx
+ orl %esi, %edx
+ movl %edx, -120(%ebp) # 4-byte Spill
+.LBB177_5:
+ movl -136(%ebp), %edx # 4-byte Reload
+ movl %ecx, %esi
+ addl %esi, %esi
+ adcl %edx, %edx
+ movl -124(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB177_6
+# BB#7:
+ xorl %edx, %edx
+ movl $0, -128(%ebp) # 4-byte Folded Spill
+ movl -140(%ebp), %edi # 4-byte Reload
+ jmp .LBB177_8
+.LBB177_6:
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ movl -140(%ebp), %edi # 4-byte Reload
+ movl %edi, %ecx
+ shrl $31, %ecx
+ orl %esi, %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ movl -124(%ebp), %ecx # 4-byte Reload
+.LBB177_8:
+ movl %edx, -124(%ebp) # 4-byte Spill
+ movl %ebx, -84(%ebp)
+ movl %eax, -80(%ebp)
+ movl -144(%ebp), %esi # 4-byte Reload
+ movl %esi, -76(%ebp)
+ movl %edi, -72(%ebp)
+ movl %ecx, -68(%ebp)
+ movl -136(%ebp), %edx # 4-byte Reload
+ movl %edx, -64(%ebp)
+ movl %ebx, -108(%ebp)
+ movl %eax, -104(%ebp)
+ movl %esi, -100(%ebp)
+ movl %edi, -96(%ebp)
+ movl %ecx, -92(%ebp)
+ movl %edx, -88(%ebp)
+ movl -156(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB177_9
+# BB#10:
+ movl $0, -136(%ebp) # 4-byte Folded Spill
+ jmp .LBB177_11
+.LBB177_9:
+ shrl $31, %edx
+ movl %edx, -136(%ebp) # 4-byte Spill
+.LBB177_11:
+ leal -108(%ebp), %eax
+ movl %eax, 8(%esp)
+ leal -84(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -60(%ebp), %eax
+ movl %eax, (%esp)
+ movl -148(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -152(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre6L@PLT
+ movl -112(%ebp), %eax # 4-byte Reload
+ addl -36(%ebp), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ movl -132(%ebp), %edi # 4-byte Reload
+ adcl -32(%ebp), %edi
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+ adcl -136(%ebp), %esi # 4-byte Folded Reload
+ movl -60(%ebp), %edx
+ movl 8(%ebp), %eax
+ subl (%eax), %edx
+ movl -56(%ebp), %ebx
+ sbbl 4(%eax), %ebx
+ movl -52(%ebp), %ecx
+ sbbl 8(%eax), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ movl -48(%ebp), %ecx
+ sbbl 12(%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -44(%ebp), %ecx
+ sbbl 16(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ecx
+ sbbl 20(%eax), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 28(%eax), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl %edi, -132(%ebp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, -156(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 36(%eax), %ecx
+ movl %ecx, -160(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 40(%eax), %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 44(%eax), %ecx
+ movl %ecx, -168(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ movl 48(%eax), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ subl %ecx, %edx
+ movl 52(%eax), %ecx
+ movl %ecx, -196(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 56(%eax), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ sbbl %ecx, -136(%ebp) # 4-byte Folded Spill
+ movl 60(%eax), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ sbbl %ecx, -144(%ebp) # 4-byte Folded Spill
+ movl 64(%eax), %ecx
+ movl %ecx, -208(%ebp) # 4-byte Spill
+ movl -172(%ebp), %edi # 4-byte Reload
+ sbbl %ecx, %edi
+ movl 68(%eax), %ecx
+ movl %ecx, -212(%ebp) # 4-byte Spill
+ sbbl %ecx, -140(%ebp) # 4-byte Folded Spill
+ movl 72(%eax), %ecx
+ movl %ecx, -216(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 76(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ sbbl %ecx, -132(%ebp) # 4-byte Folded Spill
+ movl 80(%eax), %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 84(%eax), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 88(%eax), %ecx
+ movl %ecx, -184(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 92(%eax), %ecx
+ movl %ecx, -188(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ addl -148(%ebp), %edx # 4-byte Folded Reload
+ adcl -152(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 24(%eax)
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -156(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 28(%eax)
+ movl -144(%ebp), %edx # 4-byte Reload
+ adcl -160(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 32(%eax)
+ adcl -164(%ebp), %edi # 4-byte Folded Reload
+ movl %edx, 36(%eax)
+ movl -140(%ebp), %edx # 4-byte Reload
+ adcl -168(%ebp), %edx # 4-byte Folded Reload
+ movl %edi, 40(%eax)
+ movl -112(%ebp), %ecx # 4-byte Reload
+ adcl -192(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 44(%eax)
+ movl -132(%ebp), %edi # 4-byte Reload
+ adcl -196(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 48(%eax)
+ movl -120(%ebp), %edx # 4-byte Reload
+ adcl -200(%ebp), %edx # 4-byte Folded Reload
+ movl %edi, 52(%eax)
+ movl -116(%ebp), %ecx # 4-byte Reload
+ adcl -204(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ movl -128(%ebp), %edx # 4-byte Reload
+ adcl -208(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ movl -124(%ebp), %ecx # 4-byte Reload
+ adcl -212(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ adcl -216(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl %esi, 72(%eax)
+ movl -172(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 76(%eax)
+ movl -176(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 80(%eax)
+ movl -180(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 88(%eax)
+ movl -188(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 92(%eax)
+ addl $220, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end177:
+ .size mcl_fpDbl_sqrPre12L, .Lfunc_end177-mcl_fpDbl_sqrPre12L
+
+ .globl mcl_fp_mont12L
+ .align 16, 0x90
+ .type mcl_fp_mont12L,@function
+mcl_fp_mont12L: # @mcl_fp_mont12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1436, %esp # imm = 0x59C
+ calll .L178$pb
+.L178$pb:
+ popl %ebx
+.Ltmp29:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp29-.L178$pb), %ebx
+ movl 1468(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1384(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 1384(%esp), %ebp
+ movl 1388(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 1432(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 1428(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1424(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1420(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1416(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1412(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1408(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1404(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1400(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1396(%esp), %edi
+ movl 1392(%esp), %esi
+ movl %eax, (%esp)
+ leal 1328(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ addl 1328(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1336(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 1340(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1372(%esp), %esi
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1376(%esp), %ebp
+ sbbl %edi, %edi
+ movl 1464(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1272(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %edi
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1272(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1312(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1316(%esp), %ebp
+ adcl 1320(%esp), %edi
+ sbbl %eax, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1216(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 84(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1216(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1224(%esp), %esi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1228(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1244(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1248(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1252(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1256(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1260(%esp), %ebp
+ adcl 1264(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1160(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1160(%esp), %ecx
+ adcl 1164(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1200(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ adcl 1204(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1104(%esp), %ecx
+ movl 1468(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1104(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1140(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1148(%esp), %edi
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1152(%esp), %ebp
+ adcl $0, %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1048(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 1048(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1080(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1088(%esp), %edi
+ adcl 1092(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %esi
+ movl %esi, %eax
+ addl 992(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 996(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1000(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 1004(%esp), %ebp
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1008(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1012(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1016(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1020(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1024(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1028(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1032(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1036(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1040(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl $0, %edi
+ movl 1464(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 936(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 944(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 948(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 960(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 980(%esp), %esi
+ adcl 984(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %edi
+ movl %edi, %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 880(%esp), %eax
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 892(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 912(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 924(%esp), %esi
+ movl %esi, %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 824(%esp), %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 840(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 864(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 768(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 768(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 780(%esp), %ebp
+ adcl 784(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 800(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 808(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 1460(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ movl 44(%esp), %eax # 4-byte Reload
+ addl 712(%esp), %eax
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 720(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 724(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 728(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 732(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 736(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 740(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 744(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 748(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 752(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 760(%esp), %edi
+ sbbl %ebp, %ebp
+ movl %eax, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 656(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 656(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 660(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 664(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 672(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 676(%esp), %ebp
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 704(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl $0, %edi
+ movl 1464(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 600(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 600(%esp), %ecx
+ adcl 604(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 616(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 620(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 636(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 648(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 44(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 544(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 548(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 552(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 560(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 564(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 568(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 572(%esp), %edi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 576(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 580(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 584(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 588(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 592(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 488(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 512(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 524(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 532(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 536(%esp), %ebp
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 432(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 440(%esp), %edi
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 444(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 480(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 376(%esp), %ecx
+ adcl 380(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 392(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 416(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 320(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 320(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ adcl 336(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 344(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 360(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 264(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 272(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 284(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 288(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 296(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 208(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 224(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 236(%esp), %edi
+ adcl 240(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 248(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 152(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 164(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 176(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 188(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 196(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 96(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %esi
+ addl 96(%esp), %edi
+ movl 84(%esp), %ebx # 4-byte Reload
+ movl 92(%esp), %eax # 4-byte Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %edx, %edi
+ adcl 108(%esp), %ebx
+ adcl 112(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 132(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 140(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ adcl $0, %esi
+ movl 1468(%esp), %edx
+ subl (%edx), %eax
+ sbbl 4(%edx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ sbbl 8(%edx), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ sbbl 12(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 16(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 20(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 24(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edx), %ecx
+ movl 44(%esp), %edi # 4-byte Reload
+ sbbl 32(%edx), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 36(%edx), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 40(%edx), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ sbbl 44(%edx), %ebp
+ movl %ebp, %edx
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB178_2
+# BB#1:
+ movl %ecx, 52(%esp) # 4-byte Spill
+.LBB178_2:
+ movl %esi, %ecx
+ testb %cl, %cl
+ movl 92(%esp), %ecx # 4-byte Reload
+ jne .LBB178_4
+# BB#3:
+ movl %eax, %ecx
+.LBB178_4:
+ movl 1456(%esp), %eax
+ movl %ecx, (%eax)
+ movl 68(%esp), %edi # 4-byte Reload
+ jne .LBB178_6
+# BB#5:
+ movl 16(%esp), %edi # 4-byte Reload
+.LBB178_6:
+ movl %edi, 4(%eax)
+ movl 64(%esp), %ebp # 4-byte Reload
+ jne .LBB178_8
+# BB#7:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB178_8:
+ movl %ebx, 8(%eax)
+ jne .LBB178_10
+# BB#9:
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+.LBB178_10:
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ jne .LBB178_12
+# BB#11:
+ movl 28(%esp), %ebp # 4-byte Reload
+.LBB178_12:
+ movl %ebp, 16(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB178_14
+# BB#13:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB178_14:
+ movl %ecx, 20(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB178_16
+# BB#15:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB178_16:
+ movl %ecx, 24(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB178_18
+# BB#17:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB178_18:
+ movl %ecx, 32(%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ jne .LBB178_20
+# BB#19:
+ movl 80(%esp), %ecx # 4-byte Reload
+.LBB178_20:
+ movl %ecx, 36(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ jne .LBB178_22
+# BB#21:
+ movl 84(%esp), %ecx # 4-byte Reload
+.LBB178_22:
+ movl %ecx, 40(%eax)
+ movl 88(%esp), %ecx # 4-byte Reload
+ jne .LBB178_24
+# BB#23:
+ movl %edx, %ecx
+.LBB178_24:
+ movl %ecx, 44(%eax)
+ addl $1436, %esp # imm = 0x59C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end178:
+ .size mcl_fp_mont12L, .Lfunc_end178-mcl_fp_mont12L
+
+ .globl mcl_fp_montNF12L
+ .align 16, 0x90
+ .type mcl_fp_montNF12L,@function
+mcl_fp_montNF12L: # @mcl_fp_montNF12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1420, %esp # imm = 0x58C
+ calll .L179$pb
+.L179$pb:
+ popl %ebx
+.Ltmp30:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp30-.L179$pb), %ebx
+ movl 1452(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1368(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1368(%esp), %ebp
+ movl 1372(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 1416(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1412(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1408(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1404(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1400(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1396(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1392(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1388(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1384(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1380(%esp), %edi
+ movl 1376(%esp), %esi
+ movl %eax, (%esp)
+ leal 1312(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 1312(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1320(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 1324(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1344(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1356(%esp), %esi
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1360(%esp), %ebp
+ movl 1448(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1256(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1304(%esp), %eax
+ movl 56(%esp), %edx # 4-byte Reload
+ addl 1256(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1260(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1264(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1268(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1272(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 1276(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1280(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 1284(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1288(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1296(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 1300(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, %edi
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1200(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 1200(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 1208(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1244(%esp), %ebp
+ adcl 1248(%esp), %edi
+ movl 1448(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1192(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1144(%esp), %edx
+ adcl 1148(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1152(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1156(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 1160(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1164(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1168(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1172(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1176(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1180(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 1184(%esp), %ebp
+ adcl 1188(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1088(%esp), %ecx
+ movl 1452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ addl 1088(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %esi, %edi
+ adcl 1104(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1124(%esp), %esi
+ adcl 1128(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1136(%esp), %ebp
+ movl 1448(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1032(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1080(%esp), %eax
+ movl 52(%esp), %edx # 4-byte Reload
+ addl 1032(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1036(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1040(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 1044(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1056(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1060(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1064(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1072(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1076(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %edi
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 976(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 976(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1004(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1012(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1024(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 920(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 968(%esp), %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ addl 920(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 924(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 928(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 932(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 936(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 940(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 944(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 952(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 956(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 960(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 964(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 864(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 864(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 884(%esp), %ebp
+ adcl 888(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 900(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 912(%esp), %edi
+ movl 1448(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 808(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 856(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 808(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 824(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl 828(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 832(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ addl 752(%esp), %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 760(%esp), %edi
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 764(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 776(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 792(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1448(%esp), %ecx
+ movl %ecx, %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1444(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ movl 744(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ adcl 700(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 704(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 708(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 712(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 716(%esp), %esi
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 720(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 724(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 728(%esp), %edi
+ adcl 732(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 736(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 740(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 640(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 648(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 660(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 668(%esp), %esi
+ adcl 672(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 676(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 584(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 632(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 584(%esp), %ecx
+ adcl 588(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 596(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 608(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 616(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 620(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 528(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 528(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 540(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 564(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 568(%esp), %edi
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 572(%esp), %esi
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 576(%esp), %ebp
+ movl 1448(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 520(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 472(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 508(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 512(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 516(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 416(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 440(%esp), %ebp
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 444(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 408(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 360(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 372(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 380(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 304(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 312(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 320(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 328(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 296(%esp), %edx
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 248(%esp), %ecx
+ adcl 252(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 260(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 272(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 192(%esp), %esi
+ adcl 196(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 200(%esp), %edi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 204(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 216(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 224(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 184(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 136(%esp), %ecx
+ adcl 140(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 144(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 160(%esp), %edi
+ adcl 164(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 168(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 80(%esp), %ecx
+ movl 1452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ addl 80(%esp), %esi
+ movl 56(%esp), %esi # 4-byte Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 92(%esp), %esi
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 104(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 112(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1452(%esp), %ebp
+ subl (%ebp), %edx
+ movl %ecx, %eax
+ sbbl 4(%ebp), %eax
+ movl %esi, %ebx
+ sbbl 8(%ebp), %ebx
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl 40(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ sbbl 28(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ sbbl 32(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ sbbl 36(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 40(%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 44(%ebp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ movl 76(%esp), %ebp # 4-byte Reload
+ js .LBB179_2
+# BB#1:
+ movl %edx, %ebp
+.LBB179_2:
+ movl 1440(%esp), %edx
+ movl %ebp, (%edx)
+ movl 68(%esp), %edi # 4-byte Reload
+ js .LBB179_4
+# BB#3:
+ movl %eax, %edi
+.LBB179_4:
+ movl %edi, 4(%edx)
+ js .LBB179_6
+# BB#5:
+ movl %ebx, %esi
+.LBB179_6:
+ movl %esi, 8(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB179_8
+# BB#7:
+ movl %ecx, %eax
+.LBB179_8:
+ movl %eax, 12(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB179_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB179_10:
+ movl %eax, 16(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB179_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB179_12:
+ movl %eax, 20(%edx)
+ movl 32(%esp), %eax # 4-byte Reload
+ js .LBB179_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB179_14:
+ movl %eax, 24(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB179_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB179_16:
+ movl %eax, 28(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB179_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB179_18:
+ movl %eax, 32(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB179_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB179_20:
+ movl %eax, 36(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB179_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB179_22:
+ movl %eax, 40(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB179_24
+# BB#23:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB179_24:
+ movl %eax, 44(%edx)
+ addl $1420, %esp # imm = 0x58C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end179:
+ .size mcl_fp_montNF12L, .Lfunc_end179-mcl_fp_montNF12L
+
+ .globl mcl_fp_montRed12L
+ .align 16, 0x90
+ .type mcl_fp_montRed12L,@function
+mcl_fp_montRed12L: # @mcl_fp_montRed12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $828, %esp # imm = 0x33C
+ calll .L180$pb
+.L180$pb:
+ popl %eax
+.Ltmp31:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp31-.L180$pb), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 856(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 852(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 88(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 92(%ecx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 88(%ecx), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 80(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 68(%ecx), %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 148(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 48(%ecx), %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ movl 44(%ecx), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 32(%ecx), %edi
+ movl 28(%ecx), %esi
+ movl 24(%ecx), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 776(%esp), %ecx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ movl 88(%esp), %eax # 4-byte Reload
+ addl 776(%esp), %eax
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 796(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 804(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl 808(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 720(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 720(%esp), %esi
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 724(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 752(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 664(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 692(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 696(%esp), %ebp
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl 712(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 608(%esp), %esi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 612(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 636(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 552(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %esi
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 496(%esp), %edi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ adcl 528(%esp), %ebp
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 532(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 440(%esp), %esi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl 468(%esp), %ebp
+ movl %ebp, 156(%esp) # 4-byte Spill
+ adcl 472(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %esi # 4-byte Reload
+ adcl 476(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 856(%esp), %eax
+ movl %eax, %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 384(%esp), %edi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 388(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl 400(%esp), %ebp
+ movl 152(%esp), %edi # 4-byte Reload
+ adcl 404(%esp), %edi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl 416(%esp), %esi
+ movl %esi, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %esi # 4-byte Reload
+ adcl 424(%esp), %esi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ movl 100(%esp), %eax # 4-byte Reload
+ addl 328(%esp), %eax
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 336(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl 340(%esp), %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ adcl 344(%esp), %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 348(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 352(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 356(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 360(%esp), %ebp
+ adcl 364(%esp), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 368(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 376(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %eax, %esi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 272(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 280(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 288(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 292(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 296(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 300(%esp), %esi
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 304(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 308(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 312(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 316(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, %ebp
+ movl %eax, %edi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 216(%esp), %edi
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 220(%esp), %ecx
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl 240(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl 248(%esp), %esi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 160(%esp), %edi
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %edx # 4-byte Reload
+ adcl 172(%esp), %edx
+ movl %edx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ebx # 4-byte Reload
+ adcl 176(%esp), %ebx
+ movl %ebx, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 180(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 188(%esp), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ subl 24(%esp), %edi # 4-byte Folded Reload
+ movl 156(%esp), %esi # 4-byte Reload
+ sbbl 16(%esp), %esi # 4-byte Folded Reload
+ sbbl 20(%esp), %edx # 4-byte Folded Reload
+ sbbl 28(%esp), %ebx # 4-byte Folded Reload
+ sbbl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 140(%esp), %eax # 4-byte Reload
+ sbbl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ sbbl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ sbbl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 132(%esp) # 4-byte Spill
+ sbbl $0, %ebp
+ andl $1, %ebp
+ jne .LBB180_2
+# BB#1:
+ movl %ebx, 148(%esp) # 4-byte Spill
+.LBB180_2:
+ movl %ebp, %ebx
+ testb %bl, %bl
+ movl 152(%esp), %ebx # 4-byte Reload
+ jne .LBB180_4
+# BB#3:
+ movl %edi, %ebx
+.LBB180_4:
+ movl 848(%esp), %edi
+ movl %ebx, (%edi)
+ movl 144(%esp), %ebx # 4-byte Reload
+ jne .LBB180_6
+# BB#5:
+ movl %esi, 156(%esp) # 4-byte Spill
+.LBB180_6:
+ movl 156(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%edi)
+ movl 136(%esp), %esi # 4-byte Reload
+ jne .LBB180_8
+# BB#7:
+ movl %edx, %esi
+.LBB180_8:
+ movl %esi, 8(%edi)
+ movl 148(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%edi)
+ movl 128(%esp), %esi # 4-byte Reload
+ movl 116(%esp), %edx # 4-byte Reload
+ jne .LBB180_10
+# BB#9:
+ movl %ecx, %edx
+.LBB180_10:
+ movl %edx, 16(%edi)
+ movl 120(%esp), %edx # 4-byte Reload
+ movl 140(%esp), %ecx # 4-byte Reload
+ jne .LBB180_12
+# BB#11:
+ movl 84(%esp), %ecx # 4-byte Reload
+.LBB180_12:
+ movl %ecx, 20(%edi)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ jne .LBB180_14
+# BB#13:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB180_14:
+ movl %eax, 24(%edi)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB180_16
+# BB#15:
+ movl 92(%esp), %ebx # 4-byte Reload
+.LBB180_16:
+ movl %ebx, 28(%edi)
+ jne .LBB180_18
+# BB#17:
+ movl 96(%esp), %esi # 4-byte Reload
+.LBB180_18:
+ movl %esi, 32(%edi)
+ jne .LBB180_20
+# BB#19:
+ movl 100(%esp), %edx # 4-byte Reload
+.LBB180_20:
+ movl %edx, 36(%edi)
+ jne .LBB180_22
+# BB#21:
+ movl 112(%esp), %ecx # 4-byte Reload
+.LBB180_22:
+ movl %ecx, 40(%edi)
+ jne .LBB180_24
+# BB#23:
+ movl 132(%esp), %eax # 4-byte Reload
+.LBB180_24:
+ movl %eax, 44(%edi)
+ addl $828, %esp # imm = 0x33C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end180:
+ .size mcl_fp_montRed12L, .Lfunc_end180-mcl_fp_montRed12L
+
+ .globl mcl_fp_addPre12L
+ .align 16, 0x90
+ .type mcl_fp_addPre12L,@function
+mcl_fp_addPre12L: # @mcl_fp_addPre12L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ebx)
+ movl 40(%ecx), %esi
+ adcl %edi, %esi
+ movl %edx, 36(%ebx)
+ movl %esi, 40(%ebx)
+ movl 44(%eax), %eax
+ movl 44(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 44(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end181:
+ .size mcl_fp_addPre12L, .Lfunc_end181-mcl_fp_addPre12L
+
+ .globl mcl_fp_subPre12L
+ .align 16, 0x90
+ .type mcl_fp_subPre12L,@function
+mcl_fp_subPre12L: # @mcl_fp_subPre12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ebp)
+ movl 40(%ecx), %edi
+ sbbl %ebx, %edi
+ movl %esi, 36(%ebp)
+ movl %edi, 40(%ebp)
+ movl 44(%edx), %edx
+ movl 44(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 44(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end182:
+ .size mcl_fp_subPre12L, .Lfunc_end182-mcl_fp_subPre12L
+
+ .globl mcl_fp_shr1_12L
+ .align 16, 0x90
+ .type mcl_fp_shr1_12L,@function
+mcl_fp_shr1_12L: # @mcl_fp_shr1_12L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 40(%ecx)
+ shrl %eax
+ movl %eax, 44(%ecx)
+ popl %esi
+ retl
+.Lfunc_end183:
+ .size mcl_fp_shr1_12L, .Lfunc_end183-mcl_fp_shr1_12L
+
+ .globl mcl_fp_add12L
+ .align 16, 0x90
+ .type mcl_fp_add12L,@function
+mcl_fp_add12L: # @mcl_fp_add12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 64(%esp), %ebx
+ movl (%ebx), %edx
+ movl 4(%ebx), %ecx
+ movl 60(%esp), %eax
+ addl (%eax), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 4(%eax), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 8(%ebx), %ecx
+ adcl 8(%eax), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 12(%eax), %edx
+ movl 16(%eax), %ecx
+ adcl 12(%ebx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 16(%ebx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 20(%eax), %ecx
+ adcl 20(%ebx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ adcl 24(%ebx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 28(%eax), %ecx
+ adcl 28(%ebx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 32(%eax), %ebp
+ adcl 32(%ebx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl 36(%eax), %edi
+ adcl 36(%ebx), %edi
+ movl 40(%eax), %esi
+ adcl 40(%ebx), %esi
+ movl 44(%eax), %edx
+ adcl 44(%ebx), %edx
+ movl 56(%esp), %ebx
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%ebx)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%ebx)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%ebx)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%ebx)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%ebx)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%ebx)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%ebx)
+ movl %ebp, 32(%ebx)
+ movl %edi, 36(%ebx)
+ movl %esi, 40(%ebx)
+ movl %edx, 44(%ebx)
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 68(%esp), %ebp
+ subl (%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ sbbl 4(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ sbbl 8(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ sbbl 12(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ sbbl 16(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 20(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ sbbl 24(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ sbbl 28(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl (%esp), %eax # 4-byte Reload
+ sbbl 32(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 36(%ebp), %edi
+ sbbl 40(%ebp), %esi
+ sbbl 44(%ebp), %edx
+ sbbl $0, %ecx
+ testb $1, %cl
+ jne .LBB184_2
+# BB#1: # %nocarry
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ebx)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ebx)
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebx)
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebx)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebx)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%ebx)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebx)
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, 32(%ebx)
+ movl %edi, 36(%ebx)
+ movl %esi, 40(%ebx)
+ movl %edx, 44(%ebx)
+.LBB184_2: # %carry
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end184:
+ .size mcl_fp_add12L, .Lfunc_end184-mcl_fp_add12L
+
+ .globl mcl_fp_addNF12L
+ .align 16, 0x90
+ .type mcl_fp_addNF12L,@function
+mcl_fp_addNF12L: # @mcl_fp_addNF12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 116(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ movl 112(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 4(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 40(%esi), %ebp
+ movl 36(%esi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 20(%esi), %ebx
+ movl 16(%esi), %edi
+ movl 12(%esi), %ecx
+ movl 8(%esi), %eax
+ adcl 8(%edx), %eax
+ adcl 12(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 16(%edx), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 20(%edx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 24(%edx), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 28(%edx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 32(%edx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 36(%edx), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl %eax, %esi
+ adcl 40(%edx), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 44(%edx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp
+ movl 60(%esp), %edx # 4-byte Reload
+ subl (%ebp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 4(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ sbbl 8(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 16(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%ebp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ sbbl 40(%ebp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 44(%ebp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ movl 60(%esp), %edi # 4-byte Reload
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ js .LBB185_2
+# BB#1:
+ movl %edx, %edi
+.LBB185_2:
+ movl 108(%esp), %edx
+ movl %edi, (%edx)
+ movl 64(%esp), %edi # 4-byte Reload
+ js .LBB185_4
+# BB#3:
+ movl (%esp), %edi # 4-byte Reload
+.LBB185_4:
+ movl %edi, 4(%edx)
+ movl %eax, %ebp
+ js .LBB185_6
+# BB#5:
+ movl 4(%esp), %esi # 4-byte Reload
+.LBB185_6:
+ movl %esi, 8(%edx)
+ movl %ecx, %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ js .LBB185_8
+# BB#7:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB185_8:
+ movl %ecx, 12(%edx)
+ movl 76(%esp), %ebx # 4-byte Reload
+ movl 84(%esp), %edi # 4-byte Reload
+ js .LBB185_10
+# BB#9:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB185_10:
+ movl %eax, 16(%edx)
+ movl 80(%esp), %ecx # 4-byte Reload
+ js .LBB185_12
+# BB#11:
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+.LBB185_12:
+ movl 56(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%edx)
+ js .LBB185_14
+# BB#13:
+ movl 20(%esp), %ebp # 4-byte Reload
+.LBB185_14:
+ movl %ebp, 24(%edx)
+ js .LBB185_16
+# BB#15:
+ movl 24(%esp), %edi # 4-byte Reload
+.LBB185_16:
+ movl %edi, 28(%edx)
+ js .LBB185_18
+# BB#17:
+ movl 28(%esp), %ebx # 4-byte Reload
+.LBB185_18:
+ movl %ebx, 32(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB185_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB185_20:
+ movl %eax, 36(%edx)
+ js .LBB185_22
+# BB#21:
+ movl 36(%esp), %esi # 4-byte Reload
+.LBB185_22:
+ movl %esi, 40(%edx)
+ js .LBB185_24
+# BB#23:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB185_24:
+ movl %ecx, 44(%edx)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end185:
+ .size mcl_fp_addNF12L, .Lfunc_end185-mcl_fp_addNF12L
+
+ .globl mcl_fp_sub12L
+ .align 16, 0x90
+ .type mcl_fp_sub12L,@function
+mcl_fp_sub12L: # @mcl_fp_sub12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ xorl %ebx, %ebx
+ movl 68(%esp), %edi
+ subl (%edi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 28(%esi), %edx
+ sbbl 28(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 32(%esi), %ecx
+ sbbl 32(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 40(%esi), %ebp
+ sbbl 40(%edi), %ebp
+ movl 44(%esi), %esi
+ sbbl 44(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 60(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl %edx, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ movl %eax, 36(%ebx)
+ movl %ebp, 40(%ebx)
+ movl %esi, 44(%ebx)
+ je .LBB186_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 72(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl %eax, 36(%ebx)
+ movl 40(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 40(%ebx)
+ movl 44(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 44(%ebx)
+.LBB186_2: # %nocarry
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end186:
+ .size mcl_fp_sub12L, .Lfunc_end186-mcl_fp_sub12L
+
+ .globl mcl_fp_subNF12L
+ .align 16, 0x90
+ .type mcl_fp_subNF12L,@function
+mcl_fp_subNF12L: # @mcl_fp_subNF12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 96(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 100(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ sarl $31, %eax
+ movl %eax, %edx
+ addl %edx, %edx
+ movl %eax, %edi
+ adcl %edi, %edi
+ movl %eax, %ebp
+ adcl %ebp, %ebp
+ movl %eax, %esi
+ adcl %esi, %esi
+ shrl $31, %ecx
+ orl %edx, %ecx
+ movl 104(%esp), %edx
+ andl 12(%edx), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ andl 8(%edx), %ebp
+ andl 4(%edx), %edi
+ andl (%edx), %ecx
+ movl 44(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 40(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 36(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 32(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 28(%edx), %esi
+ andl %eax, %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 24(%edx), %ebx
+ andl %eax, %ebx
+ movl 20(%edx), %esi
+ andl %eax, %esi
+ andl 16(%edx), %eax
+ addl 48(%esp), %ecx # 4-byte Folded Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 92(%esp), %edx
+ movl %ecx, (%edx)
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 4(%edx)
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 8(%edx)
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 12(%edx)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 16(%edx)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 20(%edx)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 24(%edx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 28(%edx)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%edx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%edx)
+ movl %eax, 40(%edx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%edx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end187:
+ .size mcl_fp_subNF12L, .Lfunc_end187-mcl_fp_subNF12L
+
+ .globl mcl_fpDbl_add12L
+ .align 16, 0x90
+ .type mcl_fpDbl_add12L,@function
+mcl_fpDbl_add12L: # @mcl_fpDbl_add12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 116(%esp), %ecx
+ movl 112(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edi), %ebp
+ movl 108(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%ecx), %esi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 56(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%edi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%edi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebx, %esi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%edi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %esi, 36(%eax)
+ movl 44(%edi), %esi
+ adcl %ebx, %esi
+ movl 48(%ecx), %ebx
+ movl %edx, 40(%eax)
+ movl 48(%edi), %edx
+ adcl %ebx, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 52(%ecx), %ebx
+ movl %esi, 44(%eax)
+ movl 52(%edi), %eax
+ adcl %ebx, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 56(%edi), %eax
+ adcl %ebp, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl 60(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl 64(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl 68(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl 72(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl 76(%edi), %edx
+ adcl %eax, %edx
+ movl 80(%ecx), %esi
+ movl 80(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 84(%ecx), %ebx
+ movl 84(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 88(%ecx), %ebp
+ movl 88(%edi), %ebx
+ adcl %ebp, %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 92(%ecx), %ecx
+ movl 92(%edi), %edi
+ adcl %ecx, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 120(%esp), %ebp
+ movl 72(%esp), %edi # 4-byte Reload
+ subl (%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %edx, %edi
+ sbbl 28(%ebp), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 32(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl 36(%ebp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl 44(%esp), %ebx # 4-byte Reload
+ sbbl 40(%ebp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ sbbl 44(%ebp), %edi
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB188_2
+# BB#1:
+ movl %edi, %ebx
+.LBB188_2:
+ testb %cl, %cl
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB188_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB188_4:
+ movl 108(%esp), %eax
+ movl %ecx, 48(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl %ebp, 64(%eax)
+ movl %edi, 68(%eax)
+ movl %esi, 72(%eax)
+ movl %edx, 76(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ jne .LBB188_6
+# BB#5:
+ movl 32(%esp), %edx # 4-byte Reload
+.LBB188_6:
+ movl %edx, 80(%eax)
+ movl 52(%esp), %edx # 4-byte Reload
+ jne .LBB188_8
+# BB#7:
+ movl 36(%esp), %edx # 4-byte Reload
+.LBB188_8:
+ movl %edx, 84(%eax)
+ jne .LBB188_10
+# BB#9:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB188_10:
+ movl %ecx, 88(%eax)
+ movl %ebx, 92(%eax)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end188:
+ .size mcl_fpDbl_add12L, .Lfunc_end188-mcl_fpDbl_add12L
+
+ .globl mcl_fpDbl_sub12L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub12L,@function
+mcl_fpDbl_sub12L: # @mcl_fpDbl_sub12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $76, %esp
+ movl 100(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %edx
+ movl 104(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %edx
+ movl 8(%esi), %edi
+ sbbl 8(%ebx), %edi
+ movl 96(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%esi), %eax
+ sbbl 12(%ebx), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%esi), %edx
+ sbbl 16(%ebx), %edx
+ movl %edi, 8(%ecx)
+ movl 20(%ebx), %edi
+ movl %eax, 12(%ecx)
+ movl 20(%esi), %eax
+ sbbl %edi, %eax
+ movl 24(%ebx), %edi
+ movl %edx, 16(%ecx)
+ movl 24(%esi), %edx
+ sbbl %edi, %edx
+ movl 28(%ebx), %edi
+ movl %eax, 20(%ecx)
+ movl 28(%esi), %eax
+ sbbl %edi, %eax
+ movl 32(%ebx), %edi
+ movl %edx, 24(%ecx)
+ movl 32(%esi), %edx
+ sbbl %edi, %edx
+ movl 36(%ebx), %edi
+ movl %eax, 28(%ecx)
+ movl 36(%esi), %eax
+ sbbl %edi, %eax
+ movl 40(%ebx), %edi
+ movl %edx, 32(%ecx)
+ movl 40(%esi), %edx
+ sbbl %edi, %edx
+ movl 44(%ebx), %edi
+ movl %eax, 36(%ecx)
+ movl 44(%esi), %eax
+ sbbl %edi, %eax
+ movl 48(%ebx), %edi
+ movl %edx, 40(%ecx)
+ movl 48(%esi), %edx
+ sbbl %edi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 52(%ebx), %edx
+ movl %eax, 44(%ecx)
+ movl 52(%esi), %eax
+ sbbl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 56(%ebx), %eax
+ movl 56(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 60(%ebx), %eax
+ movl 60(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 64(%ebx), %eax
+ movl 64(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 68(%ebx), %eax
+ movl 68(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 72(%ebx), %eax
+ movl 72(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 76(%ebx), %eax
+ movl 76(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 80(%ebx), %eax
+ movl 80(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 84(%ebx), %eax
+ movl 84(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 88(%ebx), %eax
+ movl 88(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 92(%ebx), %eax
+ movl 92(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 108(%esp), %ebp
+ jne .LBB189_1
+# BB#2:
+ movl $0, 36(%esp) # 4-byte Folded Spill
+ jmp .LBB189_3
+.LBB189_1:
+ movl 44(%ebp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+.LBB189_3:
+ testb %al, %al
+ jne .LBB189_4
+# BB#5:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ movl $0, %esi
+ jmp .LBB189_6
+.LBB189_4:
+ movl (%ebp), %esi
+ movl 4(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB189_6:
+ jne .LBB189_7
+# BB#8:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB189_9
+.LBB189_7:
+ movl 40(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB189_9:
+ jne .LBB189_10
+# BB#11:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB189_12
+.LBB189_10:
+ movl 36(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB189_12:
+ jne .LBB189_13
+# BB#14:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB189_15
+.LBB189_13:
+ movl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB189_15:
+ jne .LBB189_16
+# BB#17:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB189_18
+.LBB189_16:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB189_18:
+ jne .LBB189_19
+# BB#20:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB189_21
+.LBB189_19:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB189_21:
+ jne .LBB189_22
+# BB#23:
+ movl $0, %ebx
+ jmp .LBB189_24
+.LBB189_22:
+ movl 20(%ebp), %ebx
+.LBB189_24:
+ jne .LBB189_25
+# BB#26:
+ movl $0, %eax
+ jmp .LBB189_27
+.LBB189_25:
+ movl 16(%ebp), %eax
+.LBB189_27:
+ jne .LBB189_28
+# BB#29:
+ movl %ebp, %edx
+ movl $0, %ebp
+ jmp .LBB189_30
+.LBB189_28:
+ movl %ebp, %edx
+ movl 12(%edx), %ebp
+.LBB189_30:
+ jne .LBB189_31
+# BB#32:
+ xorl %edx, %edx
+ jmp .LBB189_33
+.LBB189_31:
+ movl 8(%edx), %edx
+.LBB189_33:
+ addl 32(%esp), %esi # 4-byte Folded Reload
+ movl 12(%esp), %edi # 4-byte Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 52(%ecx)
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, 56(%ecx)
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 60(%ecx)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 68(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 76(%ecx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 80(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 84(%ecx)
+ movl %eax, 88(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%ecx)
+ addl $76, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end189:
+ .size mcl_fpDbl_sub12L, .Lfunc_end189-mcl_fpDbl_sub12L
+
+ .align 16, 0x90
+ .type .LmulPv416x32,@function
+.LmulPv416x32: # @mulPv416x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl %edx, %edi
+ movl 108(%esp), %ebp
+ movl %ebp, %eax
+ mull 48(%edi)
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 44(%edi)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 40(%edi)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 36(%edi)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 32(%edi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 28(%edi)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 24(%edi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 20(%edi)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 16(%edi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 12(%edi)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 8(%edi)
+ movl %edx, %esi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 4(%edi)
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull (%edi)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%ecx)
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 52(%ecx)
+ movl %ecx, %eax
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end190:
+ .size .LmulPv416x32, .Lfunc_end190-.LmulPv416x32
+
+ .globl mcl_fp_mulUnitPre13L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre13L,@function
+mcl_fp_mulUnitPre13L: # @mcl_fp_mulUnitPre13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ calll .L191$pb
+.L191$pb:
+ popl %ebx
+.Ltmp32:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp32-.L191$pb), %ebx
+ movl 136(%esp), %eax
+ movl %eax, (%esp)
+ leal 48(%esp), %ecx
+ movl 132(%esp), %edx
+ calll .LmulPv416x32
+ movl 100(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp
+ movl 64(%esp), %ebx
+ movl 60(%esp), %edi
+ movl 56(%esp), %esi
+ movl 48(%esp), %edx
+ movl 52(%esp), %ecx
+ movl 128(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end191:
+ .size mcl_fp_mulUnitPre13L, .Lfunc_end191-mcl_fp_mulUnitPre13L
+
+ .globl mcl_fpDbl_mulPre13L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre13L,@function
+mcl_fpDbl_mulPre13L: # @mcl_fpDbl_mulPre13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $844, %esp # imm = 0x34C
+ calll .L192$pb
+.L192$pb:
+ popl %edi
+.Ltmp33:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp33-.L192$pb), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 872(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 868(%esp), %edx
+ movl %edx, %esi
+ movl %edi, %ebx
+ calll .LmulPv416x32
+ movl 836(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 784(%esp), %eax
+ movl 788(%esp), %ebp
+ movl 864(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 872(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 728(%esp), %ecx
+ movl %esi, %edx
+ movl %edi, %ebx
+ calll .LmulPv416x32
+ addl 728(%esp), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 780(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 776(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 748(%esp), %edi
+ movl 744(%esp), %esi
+ movl 740(%esp), %edx
+ movl 732(%esp), %eax
+ movl 736(%esp), %ecx
+ movl 864(%esp), %ebp
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 724(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 720(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 716(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 712(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 708(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 696(%esp), %ebx
+ movl 692(%esp), %edi
+ movl 688(%esp), %esi
+ movl 684(%esp), %edx
+ movl 676(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 616(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 660(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 656(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 652(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 648(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 644(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 640(%esp), %ebx
+ movl 636(%esp), %edi
+ movl 632(%esp), %esi
+ movl 628(%esp), %edx
+ movl 620(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 624(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 560(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 584(%esp), %ebx
+ movl 580(%esp), %edi
+ movl 576(%esp), %esi
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 504(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 528(%esp), %ebx
+ movl 524(%esp), %edi
+ movl 520(%esp), %esi
+ movl 516(%esp), %edx
+ movl 508(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 512(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 448(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 448(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 472(%esp), %ebp
+ movl 468(%esp), %edi
+ movl 464(%esp), %esi
+ movl 460(%esp), %edx
+ movl 452(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 456(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 392(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 444(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %ebx
+ movl 412(%esp), %edi
+ movl 408(%esp), %esi
+ movl 404(%esp), %edx
+ movl 396(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 400(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 336(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 336(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 360(%esp), %ebp
+ movl 356(%esp), %edi
+ movl 352(%esp), %esi
+ movl 348(%esp), %edx
+ movl 340(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 280(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 304(%esp), %ebx
+ movl 300(%esp), %edi
+ movl 296(%esp), %esi
+ movl 292(%esp), %edx
+ movl 284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 288(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 224(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 248(%esp), %ebx
+ movl 244(%esp), %edi
+ movl 240(%esp), %esi
+ movl 236(%esp), %edx
+ movl 228(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 232(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %edi
+ movl 44(%edi), %eax
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 868(%esp), %eax
+ movl %eax, %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %esi # 4-byte Reload
+ addl 168(%esp), %esi
+ movl 220(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 204(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 196(%esp), %ebp
+ movl 192(%esp), %ebx
+ movl 188(%esp), %edi
+ movl 184(%esp), %edx
+ movl 180(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %esi, 44(%eax)
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 104(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 112(%esp), %esi
+ movl %esi, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 120(%esp), %edi
+ movl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 148(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 136(%esp), %ebx
+ movl 132(%esp), %esi
+ movl 128(%esp), %edx
+ movl 124(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %ebp, 48(%eax)
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 56(%eax)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ adcl 104(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 68(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 72(%eax)
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 84(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl %ecx, 92(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 100(%eax)
+ addl $844, %esp # imm = 0x34C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end192:
+ .size mcl_fpDbl_mulPre13L, .Lfunc_end192-mcl_fpDbl_mulPre13L
+
+ .globl mcl_fpDbl_sqrPre13L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre13L,@function
+mcl_fpDbl_sqrPre13L: # @mcl_fpDbl_sqrPre13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $844, %esp # imm = 0x34C
+ calll .L193$pb
+.L193$pb:
+ popl %ebx
+.Ltmp34:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp34-.L193$pb), %ebx
+ movl %ebx, 108(%esp) # 4-byte Spill
+ movl 868(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl %edx, %edi
+ movl %ebx, %esi
+ calll .LmulPv416x32
+ movl 836(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 784(%esp), %eax
+ movl 788(%esp), %ebp
+ movl 864(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 728(%esp), %ecx
+ movl %esi, %ebx
+ calll .LmulPv416x32
+ addl 728(%esp), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 780(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 776(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 748(%esp), %edi
+ movl 744(%esp), %esi
+ movl 740(%esp), %edx
+ movl 732(%esp), %eax
+ movl 736(%esp), %ecx
+ movl 864(%esp), %ebp
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 724(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 720(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 716(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 712(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 708(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 696(%esp), %ebx
+ movl 692(%esp), %edi
+ movl 688(%esp), %esi
+ movl 684(%esp), %edx
+ movl 676(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 616(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 660(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 656(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 652(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 648(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 644(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 640(%esp), %ebx
+ movl 636(%esp), %edi
+ movl 632(%esp), %esi
+ movl 628(%esp), %edx
+ movl 620(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 624(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 560(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 584(%esp), %ebx
+ movl 580(%esp), %edi
+ movl 576(%esp), %esi
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 504(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 528(%esp), %ebx
+ movl 524(%esp), %edi
+ movl 520(%esp), %esi
+ movl 516(%esp), %edx
+ movl 508(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 512(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 448(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 448(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 472(%esp), %ebp
+ movl 468(%esp), %edi
+ movl 464(%esp), %esi
+ movl 460(%esp), %edx
+ movl 452(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 456(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 392(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 444(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %ebx
+ movl 412(%esp), %edi
+ movl 408(%esp), %esi
+ movl 404(%esp), %edx
+ movl 396(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 400(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 336(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 336(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 360(%esp), %ebp
+ movl 356(%esp), %edi
+ movl 352(%esp), %esi
+ movl 348(%esp), %edx
+ movl 340(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 280(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 304(%esp), %ebx
+ movl 300(%esp), %edi
+ movl 296(%esp), %esi
+ movl 292(%esp), %edx
+ movl 284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 288(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 224(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 248(%esp), %ebx
+ movl 244(%esp), %edi
+ movl 240(%esp), %esi
+ movl 236(%esp), %edx
+ movl 228(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 232(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 44(%edx), %eax
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %esi # 4-byte Reload
+ addl 168(%esp), %esi
+ movl 220(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 204(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 196(%esp), %ebp
+ movl 192(%esp), %ebx
+ movl 188(%esp), %edi
+ movl 184(%esp), %edx
+ movl 180(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %esi, 44(%eax)
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 104(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 48(%edx), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 112(%esp), %esi
+ movl %esi, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 120(%esp), %edi
+ movl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 148(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 136(%esp), %ebx
+ movl 132(%esp), %esi
+ movl 128(%esp), %edx
+ movl 124(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %ebp, 48(%eax)
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 56(%eax)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ adcl 104(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 68(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 72(%eax)
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 84(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl %ecx, 92(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 100(%eax)
+ addl $844, %esp # imm = 0x34C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end193:
+ .size mcl_fpDbl_sqrPre13L, .Lfunc_end193-mcl_fpDbl_sqrPre13L
+
+ .globl mcl_fp_mont13L
+ .align 16, 0x90
+ .type mcl_fp_mont13L,@function
+mcl_fp_mont13L: # @mcl_fp_mont13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1548, %esp # imm = 0x60C
+ calll .L194$pb
+.L194$pb:
+ popl %ebx
+.Ltmp35:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp35-.L194$pb), %ebx
+ movl 1580(%esp), %eax
+ movl -4(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1488(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1488(%esp), %esi
+ movl 1492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull %edi, %eax
+ movl 1540(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1536(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1532(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1528(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1524(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1520(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1516(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1512(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1508(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1504(%esp), %edi
+ movl 1500(%esp), %ebp
+ movl 1496(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1432(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1432(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1440(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1444(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 1448(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1472(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1484(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 1576(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1376(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1376(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1388(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1404(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1412(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 1420(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1428(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1320(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1320(%esp), %ecx
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1328(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1336(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 1340(%esp), %esi
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1344(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 1348(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1352(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1360(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1364(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1368(%esp), %ebp
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1372(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1264(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1280(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1300(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1308(%esp), %ebp
+ adcl 1312(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 1580(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv416x32
+ movl 84(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1208(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1212(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1216(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 1224(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1228(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 1244(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1248(%esp), %edi
+ adcl 1252(%esp), %ebp
+ movl %ebp, %esi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1256(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1260(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1152(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1152(%esp), %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1188(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1192(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1200(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1096(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 1096(%esp), %esi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1100(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1104(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 1108(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1112(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1116(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1120(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1124(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1128(%esp), %edi
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1132(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1136(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1140(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1144(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1148(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1040(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 1040(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1064(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 1068(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 1072(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1084(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %edi
+ addl 984(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 996(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1028(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 1576(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 936(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 944(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 980(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 872(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %edi
+ addl 872(%esp), %ebp
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 884(%esp), %ebp
+ adcl 888(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 904(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1572(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv416x32
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 816(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 824(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 844(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 848(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 856(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 760(%esp), %ecx
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 764(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 768(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 772(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 776(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 784(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 792(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 796(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 800(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 804(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 808(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 812(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 36(%esp), %eax # 4-byte Reload
+ addl 704(%esp), %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 708(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 712(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 720(%esp), %ebp
+ adcl 724(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 728(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 732(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 736(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 740(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 744(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 748(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 752(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %eax, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 648(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 652(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 656(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 660(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 664(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 672(%esp), %edi
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 676(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 592(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 600(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 612(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ adcl 616(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 620(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 536(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 536(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 544(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 560(%esp), %esi
+ adcl 564(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 572(%esp), %edi
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 576(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 480(%esp), %ecx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 500(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 512(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 516(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 424(%esp), %esi
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 428(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 444(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ addl 368(%esp), %ebp
+ adcl 372(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 376(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 392(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl 52(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 312(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 320(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 328(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 336(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 340(%esp), %edi
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 344(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 256(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 268(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 280(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 284(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 288(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 200(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 208(%esp), %ebp
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 212(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 236(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 144(%esp), %ecx
+ adcl 148(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 152(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 156(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 176(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %edi
+ addl 88(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 92(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 100(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 104(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl 112(%esp), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl 116(%esp), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 120(%esp), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl 124(%esp), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 128(%esp), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 132(%esp), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 136(%esp), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ adcl 140(%esp), %ebx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 1580(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %ecx
+ sbbl 8(%ebx), %ebp
+ sbbl 12(%ebx), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ sbbl 20(%ebx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ sbbl 24(%ebx), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ sbbl 28(%ebx), %edx
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 32(%ebx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 36(%ebx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 40(%ebx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 44(%ebx), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ sbbl 48(%ebx), %esi
+ movl %esi, %ebx
+ sbbl $0, %edi
+ andl $1, %edi
+ jne .LBB194_2
+# BB#1:
+ movl %edx, 32(%esp) # 4-byte Spill
+.LBB194_2:
+ movl %edi, %edx
+ testb %dl, %dl
+ movl 80(%esp), %edx # 4-byte Reload
+ jne .LBB194_4
+# BB#3:
+ movl %eax, %edx
+.LBB194_4:
+ movl 1568(%esp), %eax
+ movl %edx, (%eax)
+ movl 64(%esp), %esi # 4-byte Reload
+ jne .LBB194_6
+# BB#5:
+ movl %ecx, %esi
+.LBB194_6:
+ movl %esi, 4(%eax)
+ jne .LBB194_8
+# BB#7:
+ movl %ebp, 76(%esp) # 4-byte Spill
+.LBB194_8:
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB194_10
+# BB#9:
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+.LBB194_10:
+ movl 84(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ jne .LBB194_12
+# BB#11:
+ movl 8(%esp), %ebp # 4-byte Reload
+.LBB194_12:
+ movl %ebp, 16(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB194_14
+# BB#13:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB194_14:
+ movl %ecx, 20(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB194_16
+# BB#15:
+ movl 16(%esp), %ecx # 4-byte Reload
+.LBB194_16:
+ movl %ecx, 24(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB194_18
+# BB#17:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB194_18:
+ movl %ecx, 32(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB194_20
+# BB#19:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB194_20:
+ movl %ecx, 36(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB194_22
+# BB#21:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB194_22:
+ movl %ecx, 40(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ jne .LBB194_24
+# BB#23:
+ movl 72(%esp), %ecx # 4-byte Reload
+.LBB194_24:
+ movl %ecx, 44(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB194_26
+# BB#25:
+ movl %ebx, %ecx
+.LBB194_26:
+ movl %ecx, 48(%eax)
+ addl $1548, %esp # imm = 0x60C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end194:
+ .size mcl_fp_mont13L, .Lfunc_end194-mcl_fp_mont13L
+
+ .globl mcl_fp_montNF13L
+ .align 16, 0x90
+ .type mcl_fp_montNF13L,@function
+mcl_fp_montNF13L: # @mcl_fp_montNF13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1548, %esp # imm = 0x60C
+ calll .L195$pb
+.L195$pb:
+ popl %ebx
+.Ltmp36:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp36-.L195$pb), %ebx
+ movl 1580(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1488(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1488(%esp), %edi
+ movl 1492(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1540(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1536(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1532(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1528(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1524(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1520(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1516(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1512(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1508(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1504(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1500(%esp), %esi
+ movl 1496(%esp), %ebp
+ movl %eax, (%esp)
+ leal 1432(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1432(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1440(%esp), %ebp
+ adcl 1444(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1448(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1472(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 1484(%esp), %edi
+ movl 1576(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1376(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1428(%esp), %ecx
+ movl 80(%esp), %edx # 4-byte Reload
+ addl 1376(%esp), %edx
+ adcl 1380(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1412(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1424(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1320(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1320(%esp), %esi
+ adcl 1324(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 1340(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1360(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1316(%esp), %eax
+ addl 1264(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1268(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1272(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1276(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 1280(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 1284(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1288(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 1300(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1312(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1208(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 1228(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1244(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1248(%esp), %esi
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 1252(%esp), %edi
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1256(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1152(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1204(%esp), %eax
+ movl 64(%esp), %edx # 4-byte Reload
+ addl 1152(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1156(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1160(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1164(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1168(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1172(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1176(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1180(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1184(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1188(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 1192(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ adcl 1196(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1200(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1096(%esp), %ecx
+ movl 1580(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv416x32
+ addl 1096(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 1116(%esp), %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1120(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1140(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1148(%esp), %ebp
+ movl 1576(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1040(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1092(%esp), %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ addl 1040(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1044(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 1056(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 1060(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1064(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1072(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1088(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl %eax, %esi
+ adcl $0, %esi
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 984(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 996(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1008(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1036(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 980(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 932(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 936(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 940(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 944(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 948(%esp), %ebp
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 960(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 968(%esp), %esi
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 972(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 976(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 872(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 872(%esp), %edi
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 876(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 892(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 912(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 868(%esp), %edx
+ addl 816(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 832(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 836(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 860(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 760(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 780(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 784(%esp), %esi
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 788(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 804(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 756(%esp), %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 704(%esp), %ecx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 708(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 712(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 716(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 720(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 724(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 728(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 732(%esp), %esi
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 736(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 740(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 744(%esp), %ebp
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 748(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 752(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 648(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 676(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 688(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 696(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 644(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 592(%esp), %ecx
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 596(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 624(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 636(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 536(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 536(%esp), %edi
+ adcl 540(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 556(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 564(%esp), %esi
+ adcl 568(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 572(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 532(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 480(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 496(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 504(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 508(%esp), %edi
+ adcl 512(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 528(%esp), %ebp
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 424(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 452(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 460(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 472(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 420(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 368(%esp), %ecx
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 372(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 392(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 400(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 404(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 312(%esp), %esi
+ adcl 316(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 320(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 348(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 308(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 256(%esp), %ecx
+ adcl 260(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 272(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 288(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 200(%esp), %esi
+ adcl 204(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 216(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 228(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 196(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 144(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 152(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 156(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 164(%esp), %ebp
+ adcl 168(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 88(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 92(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 100(%esp), %edi
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 104(%esp), %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 132(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1580(%esp), %eax
+ subl (%eax), %edx
+ movl %ecx, %ebp
+ sbbl 4(%eax), %ebp
+ movl %edi, %ecx
+ sbbl 8(%eax), %ecx
+ sbbl 12(%eax), %ebx
+ sbbl 16(%eax), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 20(%eax), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 24(%eax), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 28(%eax), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 32(%eax), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 36(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ sbbl 40(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ sbbl 44(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ sbbl 48(%eax), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl %esi, %eax
+ sarl $31, %eax
+ testl %eax, %eax
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB195_2
+# BB#1:
+ movl %edx, %eax
+.LBB195_2:
+ movl 1568(%esp), %edx
+ movl %eax, (%edx)
+ movl 80(%esp), %esi # 4-byte Reload
+ js .LBB195_4
+# BB#3:
+ movl %ebp, %esi
+.LBB195_4:
+ movl %esi, 4(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB195_6
+# BB#5:
+ movl %ecx, %edi
+.LBB195_6:
+ movl %edi, 8(%edx)
+ js .LBB195_8
+# BB#7:
+ movl %ebx, %eax
+.LBB195_8:
+ movl %eax, 12(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB195_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB195_10:
+ movl %eax, 16(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB195_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB195_12:
+ movl %eax, 20(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB195_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB195_14:
+ movl %eax, 24(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB195_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB195_16:
+ movl %eax, 28(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB195_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB195_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB195_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB195_20:
+ movl %eax, 36(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB195_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB195_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB195_24
+# BB#23:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB195_24:
+ movl %eax, 44(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB195_26
+# BB#25:
+ movl 68(%esp), %eax # 4-byte Reload
+.LBB195_26:
+ movl %eax, 48(%edx)
+ addl $1548, %esp # imm = 0x60C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end195:
+ .size mcl_fp_montNF13L, .Lfunc_end195-mcl_fp_montNF13L
+
+ .globl mcl_fp_montRed13L
+ .align 16, 0x90
+ .type mcl_fp_montRed13L,@function
+mcl_fp_montRed13L: # @mcl_fp_montRed13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $892, %esp # imm = 0x37C
+ calll .L196$pb
+.L196$pb:
+ popl %eax
+.Ltmp37:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp37-.L196$pb), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 920(%esp), %edx
+ movl -4(%edx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 916(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ imull %eax, %ebx
+ movl 100(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 144(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 68(%ecx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ movl %esi, 148(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 152(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 44(%ecx), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ movl 40(%ecx), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 36(%ecx), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %edi
+ movl 20(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 8(%ecx), %esi
+ movl (%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 832(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 76(%esp), %eax # 4-byte Reload
+ addl 832(%esp), %eax
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 836(%esp), %ecx
+ adcl 840(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 856(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 860(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 776(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ andl $1, %esi
+ addl 776(%esp), %edi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %edi
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 720(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 720(%esp), %esi
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 724(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 664(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 608(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 612(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 552(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 496(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ adcl 532(%esp), %ebp
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl 536(%esp), %edi
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 440(%esp), %esi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 472(%esp), %ebp
+ movl %ebp, 152(%esp) # 4-byte Spill
+ adcl 476(%esp), %edi
+ movl %edi, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 384(%esp), %esi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 388(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ adcl 404(%esp), %ebp
+ movl 140(%esp), %edi # 4-byte Reload
+ adcl 408(%esp), %edi
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %esi # 4-byte Reload
+ adcl 420(%esp), %esi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 920(%esp), %eax
+ movl %eax, %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 328(%esp), %eax
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 344(%esp), %ebp
+ movl %ebp, 156(%esp) # 4-byte Spill
+ adcl 348(%esp), %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl 360(%esp), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ movl 72(%esp), %esi # 4-byte Reload
+ imull %esi, %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 272(%esp), %edi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl 280(%esp), %edi
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 288(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 292(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 296(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 300(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 304(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 308(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 312(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 316(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull %esi, %eax
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 216(%esp), %ebp
+ movl %edi, %ecx
+ adcl 220(%esp), %ecx
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %ebp # 4-byte Reload
+ adcl 228(%esp), %ebp
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 244(%esp), %edi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 160(%esp), %esi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl 168(%esp), %ebp
+ movl %ebp, 140(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp # 4-byte Reload
+ adcl 176(%esp), %ebp
+ movl %ebp, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl %edi, %eax
+ adcl 184(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 156(%esp), %edi # 4-byte Reload
+ subl 12(%esp), %edi # 4-byte Folded Reload
+ sbbl 4(%esp), %ebx # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ sbbl 16(%esp), %ebp # 4-byte Folded Reload
+ sbbl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 132(%esp), %edx # 4-byte Reload
+ sbbl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 144(%esp), %edx # 4-byte Reload
+ sbbl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 136(%esp), %edx # 4-byte Reload
+ sbbl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 116(%esp), %edx # 4-byte Reload
+ sbbl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ sbbl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ sbbl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ sbbl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 120(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl %esi, %eax
+ sbbl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 124(%esp) # 4-byte Spill
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB196_2
+# BB#1:
+ movl %ebp, 148(%esp) # 4-byte Spill
+.LBB196_2:
+ testb %al, %al
+ movl 156(%esp), %ebp # 4-byte Reload
+ jne .LBB196_4
+# BB#3:
+ movl %edi, %ebp
+.LBB196_4:
+ movl 912(%esp), %edi
+ movl %ebp, (%edi)
+ movl 140(%esp), %ebp # 4-byte Reload
+ jne .LBB196_6
+# BB#5:
+ movl %ebx, %ebp
+.LBB196_6:
+ movl %ebp, 4(%edi)
+ movl 152(%esp), %ebx # 4-byte Reload
+ jne .LBB196_8
+# BB#7:
+ movl %ecx, %ebx
+.LBB196_8:
+ movl %ebx, 8(%edi)
+ movl 148(%esp), %esi # 4-byte Reload
+ movl %esi, 12(%edi)
+ movl 116(%esp), %ebx # 4-byte Reload
+ movl 128(%esp), %esi # 4-byte Reload
+ jne .LBB196_10
+# BB#9:
+ movl 72(%esp), %esi # 4-byte Reload
+.LBB196_10:
+ movl %esi, 16(%edi)
+ movl 112(%esp), %esi # 4-byte Reload
+ movl 132(%esp), %edx # 4-byte Reload
+ jne .LBB196_12
+# BB#11:
+ movl 76(%esp), %edx # 4-byte Reload
+.LBB196_12:
+ movl %edx, 20(%edi)
+ movl 96(%esp), %edx # 4-byte Reload
+ movl 144(%esp), %ecx # 4-byte Reload
+ jne .LBB196_14
+# BB#13:
+ movl 80(%esp), %ecx # 4-byte Reload
+.LBB196_14:
+ movl %ecx, 24(%edi)
+ movl 100(%esp), %ecx # 4-byte Reload
+ movl 136(%esp), %eax # 4-byte Reload
+ jne .LBB196_16
+# BB#15:
+ movl 84(%esp), %eax # 4-byte Reload
+.LBB196_16:
+ movl %eax, 28(%edi)
+ movl 92(%esp), %eax # 4-byte Reload
+ jne .LBB196_18
+# BB#17:
+ movl 88(%esp), %ebx # 4-byte Reload
+.LBB196_18:
+ movl %ebx, 32(%edi)
+ jne .LBB196_20
+# BB#19:
+ movl 104(%esp), %esi # 4-byte Reload
+.LBB196_20:
+ movl %esi, 36(%edi)
+ jne .LBB196_22
+# BB#21:
+ movl 108(%esp), %edx # 4-byte Reload
+.LBB196_22:
+ movl %edx, 40(%edi)
+ jne .LBB196_24
+# BB#23:
+ movl 120(%esp), %ecx # 4-byte Reload
+.LBB196_24:
+ movl %ecx, 44(%edi)
+ jne .LBB196_26
+# BB#25:
+ movl 124(%esp), %eax # 4-byte Reload
+.LBB196_26:
+ movl %eax, 48(%edi)
+ addl $892, %esp # imm = 0x37C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end196:
+ .size mcl_fp_montRed13L, .Lfunc_end196-mcl_fp_montRed13L
+
+ .globl mcl_fp_addPre13L
+ .align 16, 0x90
+ .type mcl_fp_addPre13L,@function
+mcl_fp_addPre13L: # @mcl_fp_addPre13L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ebx)
+ movl 40(%ecx), %esi
+ adcl %edi, %esi
+ movl 44(%eax), %edi
+ movl %edx, 36(%ebx)
+ movl 44(%ecx), %edx
+ adcl %edi, %edx
+ movl %esi, 40(%ebx)
+ movl %edx, 44(%ebx)
+ movl 48(%eax), %eax
+ movl 48(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 48(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end197:
+ .size mcl_fp_addPre13L, .Lfunc_end197-mcl_fp_addPre13L
+
+ .globl mcl_fp_subPre13L
+ .align 16, 0x90
+ .type mcl_fp_subPre13L,@function
+mcl_fp_subPre13L: # @mcl_fp_subPre13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ebp)
+ movl 40(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 44(%edx), %ebx
+ movl %esi, 36(%ebp)
+ movl 44(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edi, 40(%ebp)
+ movl %esi, 44(%ebp)
+ movl 48(%edx), %edx
+ movl 48(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 48(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end198:
+ .size mcl_fp_subPre13L, .Lfunc_end198-mcl_fp_subPre13L
+
+ .globl mcl_fp_shr1_13L
+ .align 16, 0x90
+ .type mcl_fp_shr1_13L,@function
+mcl_fp_shr1_13L: # @mcl_fp_shr1_13L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl %esi, 44(%ecx)
+ shrl %eax
+ movl %eax, 48(%ecx)
+ popl %esi
+ retl
+.Lfunc_end199:
+ .size mcl_fp_shr1_13L, .Lfunc_end199-mcl_fp_shr1_13L
+
+ .globl mcl_fp_add13L
+ .align 16, 0x90
+ .type mcl_fp_add13L,@function
+mcl_fp_add13L: # @mcl_fp_add13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 68(%esp), %ebp
+ movl (%ebp), %ecx
+ movl 4(%ebp), %eax
+ movl 64(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 4(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 8(%ebp), %eax
+ adcl 8(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %eax
+ adcl 12(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 16(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ adcl 20(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 24(%ebx), %eax
+ adcl 24(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 28(%ebx), %eax
+ adcl 28(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 32(%ebx), %eax
+ adcl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 36(%ebx), %ecx
+ adcl 36(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 40(%ebx), %edi
+ adcl 40(%ebp), %edi
+ movl 44(%ebx), %edx
+ adcl 44(%ebp), %edx
+ movl 48(%ebx), %esi
+ adcl 48(%ebp), %esi
+ movl 60(%esp), %ebp
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%ebp)
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ebp)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebp)
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebp)
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebp)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebp)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%ebp)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebp)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%ebp)
+ movl %ecx, 36(%ebp)
+ movl %edi, 40(%ebp)
+ movl %edx, 44(%ebp)
+ movl %esi, 48(%ebp)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 72(%esp), %ecx
+ subl (%ecx), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ sbbl 4(%ecx), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ebx # 4-byte Reload
+ sbbl 8(%ecx), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %ebx # 4-byte Reload
+ sbbl 12(%ecx), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %ebx # 4-byte Reload
+ sbbl 16(%ecx), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %ebx # 4-byte Reload
+ sbbl 20(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %ebx # 4-byte Reload
+ sbbl 24(%ecx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ecx), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebx # 4-byte Reload
+ sbbl 32(%ecx), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl (%esp), %ebx # 4-byte Reload
+ sbbl 36(%ecx), %ebx
+ sbbl 40(%ecx), %edi
+ sbbl 44(%ecx), %edx
+ sbbl 48(%ecx), %esi
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB200_2
+# BB#1: # %nocarry
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ebp)
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ebp)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebp)
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebp)
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebp)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebp)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%ebp)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebp)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%ebp)
+ movl %ebx, 36(%ebp)
+ movl %edi, 40(%ebp)
+ movl %edx, 44(%ebp)
+ movl %esi, 48(%ebp)
+.LBB200_2: # %carry
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end200:
+ .size mcl_fp_add13L, .Lfunc_end200-mcl_fp_add13L
+
+ .globl mcl_fp_addNF13L
+ .align 16, 0x90
+ .type mcl_fp_addNF13L,@function
+mcl_fp_addNF13L: # @mcl_fp_addNF13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 128(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ movl 124(%esp), %edx
+ addl (%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 4(%edx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 48(%esi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 36(%esi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 32(%esi), %ebp
+ movl 28(%esi), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ movl 20(%esi), %ebx
+ movl 16(%esi), %edi
+ movl 12(%esi), %ecx
+ movl 8(%esi), %esi
+ adcl 8(%edx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 12(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 16(%edx), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 20(%edx), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ adcl 24(%edx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 28(%edx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 32(%edx), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 36(%edx), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 40(%edx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%edx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 48(%edx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 132(%esp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ subl (%edx), %eax
+ movl 68(%esp), %ebp # 4-byte Reload
+ sbbl 4(%edx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ sbbl 8(%edx), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ sbbl 12(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 16(%edx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%edx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ sbbl 24(%edx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 32(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ movl %esi, %ecx
+ movl %esi, %ebp
+ sbbl 36(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ movl %esi, %ecx
+ movl %esi, %edi
+ sbbl 40(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 48(%edx), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ movl 64(%esp), %edx # 4-byte Reload
+ js .LBB201_2
+# BB#1:
+ movl %eax, %edx
+.LBB201_2:
+ movl 120(%esp), %esi
+ movl %edx, (%esi)
+ movl 68(%esp), %edx # 4-byte Reload
+ js .LBB201_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+.LBB201_4:
+ movl %edx, 4(%esi)
+ movl %edi, %edx
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB201_6
+# BB#5:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB201_6:
+ movl %eax, 8(%esi)
+ movl %ebp, %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB201_8
+# BB#7:
+ movl 8(%esp), %ebx # 4-byte Reload
+.LBB201_8:
+ movl %ebx, 12(%esi)
+ movl 96(%esp), %ebp # 4-byte Reload
+ movl 56(%esp), %ecx # 4-byte Reload
+ js .LBB201_10
+# BB#9:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB201_10:
+ movl %ecx, 16(%esi)
+ movl 92(%esp), %ecx # 4-byte Reload
+ js .LBB201_12
+# BB#11:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB201_12:
+ movl %eax, 20(%esi)
+ movl 72(%esp), %ebx # 4-byte Reload
+ js .LBB201_14
+# BB#13:
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+.LBB201_14:
+ movl 76(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%esi)
+ js .LBB201_16
+# BB#15:
+ movl 24(%esp), %ebp # 4-byte Reload
+.LBB201_16:
+ movl %ebp, 28(%esi)
+ js .LBB201_18
+# BB#17:
+ movl 28(%esp), %ebx # 4-byte Reload
+.LBB201_18:
+ movl %ebx, 32(%esi)
+ js .LBB201_20
+# BB#19:
+ movl 32(%esp), %edi # 4-byte Reload
+.LBB201_20:
+ movl %edi, 36(%esi)
+ js .LBB201_22
+# BB#21:
+ movl 36(%esp), %edx # 4-byte Reload
+.LBB201_22:
+ movl %edx, 40(%esi)
+ js .LBB201_24
+# BB#23:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB201_24:
+ movl %ecx, 44(%esi)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB201_26
+# BB#25:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB201_26:
+ movl %eax, 48(%esi)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end201:
+ .size mcl_fp_addNF13L, .Lfunc_end201-mcl_fp_addNF13L
+
+ .globl mcl_fp_sub13L
+ .align 16, 0x90
+ .type mcl_fp_sub13L,@function
+mcl_fp_sub13L: # @mcl_fp_sub13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 68(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 72(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 32(%esi), %edx
+ sbbl 32(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 36(%esi), %ecx
+ sbbl 36(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 44(%esi), %ebp
+ sbbl 44(%edi), %ebp
+ movl 48(%esi), %esi
+ sbbl 48(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 64(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl %edx, 32(%ebx)
+ movl %ecx, 36(%ebx)
+ movl %eax, 40(%ebx)
+ movl %ebp, 44(%ebx)
+ movl %esi, 48(%ebx)
+ je .LBB202_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 76(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl %ecx, 40(%ebx)
+ movl 44(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 44(%ebx)
+ movl 48(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 48(%ebx)
+.LBB202_2: # %nocarry
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end202:
+ .size mcl_fp_sub13L, .Lfunc_end202-mcl_fp_sub13L
+
+ .globl mcl_fp_subNF13L
+ .align 16, 0x90
+ .type mcl_fp_subNF13L,@function
+mcl_fp_subNF13L: # @mcl_fp_subNF13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 112(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 32(%ecx), %ebp
+ movl 28(%ecx), %ebx
+ movl 24(%ecx), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 24(%edi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ sbbl 32(%edi), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ sbbl 48(%edi), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edx, %eax
+ sarl $31, %edi
+ movl %edi, %edx
+ shldl $1, %eax, %edx
+ movl 116(%esp), %esi
+ movl 4(%esi), %eax
+ andl %edx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ andl (%esi), %edx
+ movl 48(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ andl %edi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 20(%esi), %ebp
+ andl %edi, %ebp
+ movl 16(%esi), %ebx
+ andl %edi, %ebx
+ movl 12(%esi), %ecx
+ andl %edi, %ecx
+ roll %edi
+ andl 8(%esi), %edi
+ addl 56(%esp), %edx # 4-byte Folded Reload
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl 104(%esp), %esi
+ movl %edx, (%esi)
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %eax, 4(%esi)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 8(%esi)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ecx, 12(%esi)
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebx, 16(%esi)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 20(%esi)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%esi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%esi)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%esi)
+ movl %eax, 44(%esi)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end203:
+ .size mcl_fp_subNF13L, .Lfunc_end203-mcl_fp_subNF13L
+
+ .globl mcl_fpDbl_add13L
+ .align 16, 0x90
+ .type mcl_fpDbl_add13L,@function
+mcl_fpDbl_add13L: # @mcl_fpDbl_add13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $96, %esp
+ movl 124(%esp), %ecx
+ movl 120(%esp), %esi
+ movl 12(%esi), %edi
+ movl 16(%esi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%esi), %ebp
+ movl 116(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%esi), %ebp
+ adcl 8(%esi), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 60(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%esi), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%esi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%esi), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%esi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%esi), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%esi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%esi), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %edx, 40(%eax)
+ movl 48(%esi), %edx
+ adcl %ebx, %edx
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%esi), %edi
+ adcl %ebx, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 56(%ecx), %edi
+ movl %edx, 48(%eax)
+ movl 56(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 64(%ecx), %edx
+ movl 64(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 68(%ecx), %edx
+ movl 68(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 72(%ecx), %edx
+ movl 72(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%ecx), %edx
+ movl 76(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%ecx), %edx
+ movl 80(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%ecx), %edx
+ movl 84(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%ecx), %edx
+ movl 88(%esi), %edi
+ adcl %edx, %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 92(%ecx), %edx
+ movl 92(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 96(%ecx), %edx
+ movl 96(%esi), %ebx
+ adcl %edx, %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 100(%ecx), %ecx
+ movl 100(%esi), %esi
+ adcl %ecx, %esi
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 128(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ subl (%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ sbbl 36(%ebp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sbbl 40(%ebp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl %esi, %ebx
+ sbbl 44(%ebp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ sbbl 48(%ebp), %ecx
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB204_2
+# BB#1:
+ movl %ecx, %ebx
+.LBB204_2:
+ testb %dl, %dl
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB204_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB204_4:
+ movl 116(%esp), %eax
+ movl %ecx, 52(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl 88(%esp), %ecx # 4-byte Reload
+ movl %ecx, 64(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ movl %ecx, 68(%eax)
+ movl %ebp, 72(%eax)
+ movl %edi, 76(%eax)
+ movl %esi, 80(%eax)
+ movl %edx, 84(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ jne .LBB204_6
+# BB#5:
+ movl 36(%esp), %esi # 4-byte Reload
+.LBB204_6:
+ movl %esi, 88(%eax)
+ jne .LBB204_8
+# BB#7:
+ movl 40(%esp), %edx # 4-byte Reload
+.LBB204_8:
+ movl %edx, 92(%eax)
+ jne .LBB204_10
+# BB#9:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB204_10:
+ movl %ecx, 96(%eax)
+ movl %ebx, 100(%eax)
+ addl $96, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end204:
+ .size mcl_fpDbl_add13L, .Lfunc_end204-mcl_fpDbl_add13L
+
+ .globl mcl_fpDbl_sub13L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub13L,@function
+mcl_fpDbl_sub13L: # @mcl_fpDbl_sub13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %edx
+ movl 112(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %edx
+ movl 8(%edi), %esi
+ sbbl 8(%ebx), %esi
+ movl 104(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edi), %eax
+ sbbl 12(%ebx), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%edi), %edx
+ sbbl 16(%ebx), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebx), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%edi), %eax
+ sbbl %esi, %eax
+ movl 24(%ebx), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%edi), %edx
+ sbbl %esi, %edx
+ movl 28(%ebx), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%edi), %eax
+ sbbl %esi, %eax
+ movl 32(%ebx), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%edi), %edx
+ sbbl %esi, %edx
+ movl 36(%ebx), %esi
+ movl %eax, 28(%ecx)
+ movl 36(%edi), %eax
+ sbbl %esi, %eax
+ movl 40(%ebx), %esi
+ movl %edx, 32(%ecx)
+ movl 40(%edi), %edx
+ sbbl %esi, %edx
+ movl 44(%ebx), %esi
+ movl %eax, 36(%ecx)
+ movl 44(%edi), %eax
+ sbbl %esi, %eax
+ movl 48(%ebx), %esi
+ movl %edx, 40(%ecx)
+ movl 48(%edi), %edx
+ sbbl %esi, %edx
+ movl 52(%ebx), %esi
+ movl %eax, 44(%ecx)
+ movl 52(%edi), %eax
+ sbbl %esi, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 56(%ebx), %eax
+ movl %edx, 48(%ecx)
+ movl 56(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 60(%ebx), %eax
+ movl 60(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 64(%ebx), %eax
+ movl 64(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 68(%ebx), %eax
+ movl 68(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 72(%ebx), %eax
+ movl 72(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 76(%ebx), %eax
+ movl 76(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 80(%ebx), %eax
+ movl 80(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 84(%ebx), %eax
+ movl 84(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 88(%ebx), %eax
+ movl 88(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 92(%ebx), %eax
+ movl 92(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 96(%ebx), %eax
+ movl 96(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 100(%ebx), %eax
+ movl 100(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 116(%esp), %edi
+ jne .LBB205_1
+# BB#2:
+ movl $0, 44(%esp) # 4-byte Folded Spill
+ jmp .LBB205_3
+.LBB205_1:
+ movl 48(%edi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+.LBB205_3:
+ testb %al, %al
+ jne .LBB205_4
+# BB#5:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ movl $0, %ebx
+ jmp .LBB205_6
+.LBB205_4:
+ movl (%edi), %ebx
+ movl 4(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB205_6:
+ jne .LBB205_7
+# BB#8:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ jmp .LBB205_9
+.LBB205_7:
+ movl 44(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB205_9:
+ jne .LBB205_10
+# BB#11:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB205_12
+.LBB205_10:
+ movl 40(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB205_12:
+ jne .LBB205_13
+# BB#14:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB205_15
+.LBB205_13:
+ movl 36(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB205_15:
+ jne .LBB205_16
+# BB#17:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB205_18
+.LBB205_16:
+ movl 32(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB205_18:
+ jne .LBB205_19
+# BB#20:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB205_21
+.LBB205_19:
+ movl 28(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB205_21:
+ jne .LBB205_22
+# BB#23:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB205_24
+.LBB205_22:
+ movl 24(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB205_24:
+ jne .LBB205_25
+# BB#26:
+ movl $0, %eax
+ jmp .LBB205_27
+.LBB205_25:
+ movl 20(%edi), %eax
+.LBB205_27:
+ jne .LBB205_28
+# BB#29:
+ movl $0, %edx
+ jmp .LBB205_30
+.LBB205_28:
+ movl 16(%edi), %edx
+.LBB205_30:
+ jne .LBB205_31
+# BB#32:
+ movl $0, %esi
+ jmp .LBB205_33
+.LBB205_31:
+ movl 12(%edi), %esi
+.LBB205_33:
+ jne .LBB205_34
+# BB#35:
+ xorl %edi, %edi
+ jmp .LBB205_36
+.LBB205_34:
+ movl 8(%edi), %edi
+.LBB205_36:
+ addl 36(%esp), %ebx # 4-byte Folded Reload
+ movl 16(%esp), %ebp # 4-byte Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %ebp, 56(%ecx)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 60(%ecx)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 64(%ecx)
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 68(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 76(%ecx)
+ movl 8(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 80(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 84(%ecx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl %eax, 96(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%ecx)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end205:
+ .size mcl_fpDbl_sub13L, .Lfunc_end205-mcl_fpDbl_sub13L
+
+ .align 16, 0x90
+ .type .LmulPv448x32,@function
+.LmulPv448x32: # @mulPv448x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $96, %esp
+ movl %edx, %edi
+ movl 116(%esp), %esi
+ movl %esi, %eax
+ mull 52(%edi)
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 48(%edi)
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 44(%edi)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 40(%edi)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 36(%edi)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 32(%edi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 28(%edi)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 24(%edi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 20(%edi)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 16(%edi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 12(%edi)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 8(%edi)
+ movl %edx, %ebx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 4(%edi)
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull (%edi)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%ecx)
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 56(%ecx)
+ movl %ecx, %eax
+ addl $96, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end206:
+ .size .LmulPv448x32, .Lfunc_end206-.LmulPv448x32
+
+ .globl mcl_fp_mulUnitPre14L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre14L,@function
+mcl_fp_mulUnitPre14L: # @mcl_fp_mulUnitPre14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ calll .L207$pb
+.L207$pb:
+ popl %ebx
+.Ltmp38:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp38-.L207$pb), %ebx
+ movl 136(%esp), %eax
+ movl %eax, (%esp)
+ leal 48(%esp), %ecx
+ movl 132(%esp), %edx
+ calll .LmulPv448x32
+ movl 104(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp
+ movl 64(%esp), %ebx
+ movl 60(%esp), %edi
+ movl 56(%esp), %esi
+ movl 48(%esp), %edx
+ movl 52(%esp), %ecx
+ movl 128(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end207:
+ .size mcl_fp_mulUnitPre14L, .Lfunc_end207-mcl_fp_mulUnitPre14L
+
+ .globl mcl_fpDbl_mulPre14L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre14L,@function
+mcl_fpDbl_mulPre14L: # @mcl_fpDbl_mulPre14L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $268, %esp # imm = 0x10C
+ calll .L208$pb
+.L208$pb:
+ popl %ebx
+.Ltmp39:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp39-.L208$pb), %ebx
+ movl %ebx, -192(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 8(%esp)
+ movl 12(%ebp), %edi
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre7L@PLT
+ leal 28(%esi), %eax
+ movl %eax, 8(%esp)
+ leal 28(%edi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 56(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre7L@PLT
+ movl 44(%edi), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl 40(%edi), %eax
+ movl 36(%edi), %edx
+ movl (%edi), %edi
+ movl 12(%ebp), %ecx
+ movl 4(%ecx), %ecx
+ movl 12(%ebp), %ebx
+ addl 28(%ebx), %edi
+ movl %edi, -180(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ adcl 32(%edi), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ adcl 8(%edi), %edx
+ movl %edx, -212(%ebp) # 4-byte Spill
+ adcl 12(%edi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl 16(%edi), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl %eax, %ebx
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ movl (%esi), %eax
+ addl 28(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ movl 4(%esi), %eax
+ adcl 32(%esi), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl 36(%esi), %eax
+ adcl 8(%esi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl 40(%esi), %eax
+ adcl 12(%esi), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl 44(%esi), %eax
+ adcl 16(%esi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ movl 48(%esi), %ecx
+ adcl 20(%esi), %ecx
+ movl 52(%esi), %eax
+ adcl 24(%esi), %eax
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %esi
+ popl %eax
+ movl %esi, -220(%ebp) # 4-byte Spill
+ movl %ebx, %esi
+ movl %edx, -184(%ebp) # 4-byte Spill
+ movl -180(%ebp), %edx # 4-byte Reload
+ movl %edx, -188(%ebp) # 4-byte Spill
+ jb .LBB208_2
+# BB#1:
+ xorl %esi, %esi
+ movl $0, -184(%ebp) # 4-byte Folded Spill
+ movl $0, -188(%ebp) # 4-byte Folded Spill
+.LBB208_2:
+ movl %esi, -204(%ebp) # 4-byte Spill
+ movl 52(%edi), %esi
+ movl 48(%edi), %ebx
+ movl -128(%ebp), %edx # 4-byte Reload
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 20(%edi), %ebx
+ movl %ebx, -160(%ebp) # 4-byte Spill
+ adcl 24(%edi), %esi
+ movl %esi, -208(%ebp) # 4-byte Spill
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ movl -176(%ebp), %esi # 4-byte Reload
+ movl %esi, -128(%ebp) # 4-byte Spill
+ movl -172(%ebp), %esi # 4-byte Reload
+ movl %esi, -132(%ebp) # 4-byte Spill
+ movl -168(%ebp), %esi # 4-byte Reload
+ movl %esi, -136(%ebp) # 4-byte Spill
+ movl -164(%ebp), %esi # 4-byte Reload
+ movl %esi, -140(%ebp) # 4-byte Spill
+ movl -216(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -144(%ebp) # 4-byte Spill
+ jb .LBB208_4
+# BB#3:
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ movl $0, -128(%ebp) # 4-byte Folded Spill
+ movl $0, -132(%ebp) # 4-byte Folded Spill
+ movl $0, -136(%ebp) # 4-byte Folded Spill
+ movl $0, -140(%ebp) # 4-byte Folded Spill
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+.LBB208_4:
+ movl -180(%ebp), %edx # 4-byte Reload
+ movl %edx, -96(%ebp)
+ movl -200(%ebp), %esi # 4-byte Reload
+ movl %esi, -92(%ebp)
+ movl -212(%ebp), %edx # 4-byte Reload
+ movl %edx, -88(%ebp)
+ movl -196(%ebp), %edi # 4-byte Reload
+ movl %edi, -84(%ebp)
+ movl -156(%ebp), %edx # 4-byte Reload
+ movl %edx, -80(%ebp)
+ movl %ebx, -124(%ebp)
+ movl -164(%ebp), %edx # 4-byte Reload
+ movl %edx, -120(%ebp)
+ movl -168(%ebp), %edx # 4-byte Reload
+ movl %edx, -116(%ebp)
+ movl -172(%ebp), %edx # 4-byte Reload
+ movl %edx, -112(%ebp)
+ movl -176(%ebp), %edx # 4-byte Reload
+ movl %edx, -108(%ebp)
+ movl %ecx, -104(%ebp)
+ movl %edi, %ebx
+ movl %esi, %edi
+ movl %eax, -100(%ebp)
+ sbbl %edx, %edx
+ movl -160(%ebp), %eax # 4-byte Reload
+ movl %eax, -76(%ebp)
+ movl -208(%ebp), %esi # 4-byte Reload
+ movl %esi, -72(%ebp)
+ movl -220(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB208_6
+# BB#5:
+ movl $0, %esi
+ movl $0, %eax
+ movl $0, %ebx
+ movl $0, %edi
+.LBB208_6:
+ movl %eax, -160(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -124(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -96(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -68(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -188(%ebp), %eax # 4-byte Reload
+ addl %eax, -144(%ebp) # 4-byte Folded Spill
+ adcl %edi, -140(%ebp) # 4-byte Folded Spill
+ movl -184(%ebp), %eax # 4-byte Reload
+ adcl %eax, -136(%ebp) # 4-byte Folded Spill
+ adcl %ebx, -132(%ebp) # 4-byte Folded Spill
+ movl -204(%ebp), %eax # 4-byte Reload
+ adcl %eax, -128(%ebp) # 4-byte Folded Spill
+ movl -152(%ebp), %edi # 4-byte Reload
+ adcl -160(%ebp), %edi # 4-byte Folded Reload
+ adcl %esi, -148(%ebp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -156(%ebp) # 4-byte Spill
+ movl -192(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre7L@PLT
+ movl -144(%ebp), %eax # 4-byte Reload
+ addl -40(%ebp), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl -140(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ movl -136(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ movl -132(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ adcl -20(%ebp), %edi
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ adcl %esi, -156(%ebp) # 4-byte Folded Spill
+ movl -68(%ebp), %eax
+ movl 8(%ebp), %esi
+ subl (%esi), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl -64(%ebp), %ecx
+ sbbl 4(%esi), %ecx
+ movl -60(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl -56(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl -52(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl -48(%ebp), %eax
+ sbbl 20(%esi), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl -44(%ebp), %eax
+ sbbl 24(%esi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl 28(%esi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -188(%ebp) # 4-byte Spill
+ sbbl %eax, -132(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -192(%ebp) # 4-byte Spill
+ sbbl %eax, -128(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl %edi, -152(%ebp) # 4-byte Spill
+ movl 52(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ movl -148(%ebp), %edi # 4-byte Reload
+ sbbl %eax, %edi
+ sbbl $0, -156(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ subl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl 60(%esi), %eax
+ movl %eax, -232(%ebp) # 4-byte Spill
+ sbbl %eax, %ecx
+ movl 64(%esi), %eax
+ movl %eax, -236(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -240(%ebp) # 4-byte Spill
+ sbbl %eax, %edx
+ movl 72(%esi), %eax
+ movl %eax, -244(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 76(%esi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 80(%esi), %eax
+ movl %eax, -252(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 84(%esi), %eax
+ movl %eax, -256(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 96(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -132(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -128(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl %edi, -148(%ebp) # 4-byte Spill
+ movl -156(%ebp), %edi # 4-byte Reload
+ sbbl $0, %edi
+ movl -172(%ebp), %eax # 4-byte Reload
+ addl -176(%ebp), %eax # 4-byte Folded Reload
+ adcl -180(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -184(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%esi)
+ adcl -188(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ adcl -192(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 40(%esi)
+ movl -164(%ebp), %eax # 4-byte Reload
+ adcl -196(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 44(%esi)
+ movl -168(%ebp), %ecx # 4-byte Reload
+ adcl -200(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -228(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl -140(%ebp), %ecx # 4-byte Reload
+ adcl -232(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 56(%esi)
+ movl -136(%ebp), %eax # 4-byte Reload
+ adcl -236(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 60(%esi)
+ movl -132(%ebp), %ecx # 4-byte Reload
+ adcl -240(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 64(%esi)
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -244(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 68(%esi)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -248(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 72(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -252(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 76(%esi)
+ adcl -256(%ebp), %edi # 4-byte Folded Reload
+ movl %eax, 80(%esi)
+ movl %edi, 84(%esi)
+ movl -208(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 88(%esi)
+ movl -212(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 92(%esi)
+ movl -216(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 96(%esi)
+ movl -220(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -224(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -204(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ addl $268, %esp # imm = 0x10C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end208:
+ .size mcl_fpDbl_mulPre14L, .Lfunc_end208-mcl_fpDbl_mulPre14L
+
+ .globl mcl_fpDbl_sqrPre14L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre14L,@function
+mcl_fpDbl_sqrPre14L: # @mcl_fpDbl_sqrPre14L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $268, %esp # imm = 0x10C
+ calll .L209$pb
+.L209$pb:
+ popl %ebx
+.Ltmp40:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp40-.L209$pb), %ebx
+ movl %ebx, -172(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre7L@PLT
+ leal 28(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 56(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre7L@PLT
+ movl 48(%edi), %eax
+ movl 44(%edi), %ecx
+ movl 36(%edi), %edx
+ movl (%edi), %esi
+ movl 4(%edi), %ebx
+ addl 28(%edi), %esi
+ adcl 32(%edi), %ebx
+ movl %ebx, -164(%ebp) # 4-byte Spill
+ adcl 8(%edi), %edx
+ movl %edx, -160(%ebp) # 4-byte Spill
+ movl 40(%edi), %edx
+ adcl 12(%edi), %edx
+ adcl 16(%edi), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ adcl 20(%edi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ movl 52(%edi), %ecx
+ adcl 24(%edi), %ecx
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edi
+ seto %al
+ lahf
+ movl %eax, %eax
+ sbbl %ebx, %ebx
+ movl %ebx, -128(%ebp) # 4-byte Spill
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_1
+# BB#2:
+ movl %esi, -168(%ebp) # 4-byte Spill
+ movl $0, -132(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_3
+.LBB209_1:
+ leal (%esi,%esi), %eax
+ movl %esi, -168(%ebp) # 4-byte Spill
+ movl %eax, -132(%ebp) # 4-byte Spill
+.LBB209_3:
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ movl -180(%ebp), %ebx # 4-byte Reload
+ jb .LBB209_4
+# BB#5:
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_6
+.LBB209_4:
+ movl -164(%ebp), %eax # 4-byte Reload
+ movl -168(%ebp), %esi # 4-byte Reload
+ shldl $1, %esi, %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+.LBB209_6:
+ movl -176(%ebp), %edi # 4-byte Reload
+ movl -136(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_7
+# BB#8:
+ movl $0, -136(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_9
+.LBB209_7:
+ movl -160(%ebp), %eax # 4-byte Reload
+ movl -164(%ebp), %esi # 4-byte Reload
+ shldl $1, %esi, %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+.LBB209_9:
+ movl %ebx, %esi
+ movl -140(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_10
+# BB#11:
+ movl $0, -140(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_12
+.LBB209_10:
+ movl %edx, %eax
+ movl -160(%ebp), %ebx # 4-byte Reload
+ shldl $1, %ebx, %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+.LBB209_12:
+ movl -144(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_13
+# BB#14:
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_15
+.LBB209_13:
+ movl %esi, %eax
+ shldl $1, %edx, %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+.LBB209_15:
+ movl -148(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_16
+# BB#17:
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_18
+.LBB209_16:
+ movl %edi, %eax
+ shldl $1, %esi, %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+.LBB209_18:
+ movl -152(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_19
+# BB#20:
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_21
+.LBB209_19:
+ movl %ecx, %eax
+ shldl $1, %edi, %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+.LBB209_21:
+ movl -168(%ebp), %eax # 4-byte Reload
+ movl %eax, -96(%ebp)
+ movl %eax, -124(%ebp)
+ movl -164(%ebp), %eax # 4-byte Reload
+ movl %eax, -92(%ebp)
+ movl %eax, -120(%ebp)
+ movl -160(%ebp), %eax # 4-byte Reload
+ movl %eax, -88(%ebp)
+ movl %eax, -116(%ebp)
+ movl %edx, -84(%ebp)
+ movl %edx, -112(%ebp)
+ movl %esi, -80(%ebp)
+ movl %esi, -108(%ebp)
+ movl %edi, -76(%ebp)
+ movl %edi, -104(%ebp)
+ movl %ecx, -72(%ebp)
+ movl %ecx, -100(%ebp)
+ movl -184(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_22
+# BB#23:
+ xorl %edi, %edi
+ jmp .LBB209_24
+.LBB209_22:
+ shrl $31, %ecx
+ movl %ecx, %edi
+.LBB209_24:
+ leal -68(%ebp), %eax
+ movl %eax, (%esp)
+ leal -96(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -124(%ebp), %eax
+ movl %eax, 8(%esp)
+ movl -128(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -172(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre7L@PLT
+ movl -132(%ebp), %eax # 4-byte Reload
+ addl -40(%ebp), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -32(%ebp), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ movl -140(%ebp), %ecx # 4-byte Reload
+ adcl -28(%ebp), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl -24(%ebp), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -148(%ebp), %ecx # 4-byte Reload
+ adcl -20(%ebp), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -16(%ebp), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ adcl %edi, %esi
+ movl %esi, -128(%ebp) # 4-byte Spill
+ movl -68(%ebp), %ecx
+ movl 8(%ebp), %esi
+ subl (%esi), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ movl -64(%ebp), %edi
+ sbbl 4(%esi), %edi
+ movl -60(%ebp), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, -160(%ebp) # 4-byte Spill
+ movl -56(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, -168(%ebp) # 4-byte Spill
+ movl -52(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl -48(%ebp), %ecx
+ sbbl 20(%esi), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl -44(%ebp), %edx
+ sbbl 24(%esi), %edx
+ movl %edx, -164(%ebp) # 4-byte Spill
+ movl 28(%esi), %edx
+ movl %edx, -176(%ebp) # 4-byte Spill
+ sbbl %edx, -132(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl 36(%esi), %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -188(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -192(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl -128(%ebp), %ecx # 4-byte Reload
+ sbbl $0, %ecx
+ movl 56(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ movl -204(%ebp), %edx # 4-byte Reload
+ subl %eax, %edx
+ movl 60(%esi), %eax
+ movl %eax, -232(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl 64(%esi), %eax
+ movl %eax, -236(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -240(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 72(%esi), %eax
+ movl %eax, -244(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 76(%esi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ sbbl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl 80(%esi), %eax
+ movl %eax, -252(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 84(%esi), %eax
+ movl %eax, -256(%ebp) # 4-byte Spill
+ sbbl %eax, -132(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, -156(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 96(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ sbbl $0, %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ movl %edx, %eax
+ addl -176(%ebp), %eax # 4-byte Folded Reload
+ adcl -180(%ebp), %edi # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -184(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 32(%esi)
+ movl -168(%ebp), %ecx # 4-byte Reload
+ adcl -188(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ adcl -192(%ebp), %ebx # 4-byte Folded Reload
+ movl %ecx, 40(%esi)
+ movl -172(%ebp), %eax # 4-byte Reload
+ adcl -196(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 44(%esi)
+ movl -164(%ebp), %ecx # 4-byte Reload
+ adcl -200(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl -132(%ebp), %eax # 4-byte Reload
+ adcl -228(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl -156(%ebp), %edx # 4-byte Reload
+ adcl -232(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 56(%esi)
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -236(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 60(%esi)
+ movl -140(%ebp), %eax # 4-byte Reload
+ adcl -240(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 64(%esi)
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl -244(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 68(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -248(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 72(%esi)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -252(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 76(%esi)
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -256(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 80(%esi)
+ movl %eax, 84(%esi)
+ movl -204(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 88(%esi)
+ movl -208(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 92(%esi)
+ movl -212(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 96(%esi)
+ movl -216(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -220(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -224(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ addl $268, %esp # imm = 0x10C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end209:
+ .size mcl_fpDbl_sqrPre14L, .Lfunc_end209-mcl_fpDbl_sqrPre14L
+
+ .globl mcl_fp_mont14L
+ .align 16, 0x90
+ .type mcl_fp_mont14L,@function
+mcl_fp_mont14L: # @mcl_fp_mont14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1900, %esp # imm = 0x76C
+ calll .L210$pb
+.L210$pb:
+ popl %ebx
+.Ltmp41:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp41-.L210$pb), %ebx
+ movl 1932(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1840(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 1840(%esp), %edi
+ movl 1844(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1896(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 1892(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 1888(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 1884(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1880(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1876(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1872(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1868(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1864(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1860(%esp), %esi
+ movl 1856(%esp), %ebp
+ movl 1852(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1848(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1776(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ addl 1776(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1784(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1792(%esp), %ebp
+ adcl 1796(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1804(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 1928(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1712(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %edi
+ movl %edi, %edx
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 1712(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1716(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1720(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1724(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 1728(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1732(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1740(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1756(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1768(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1648(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl 100(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1648(%esp), %ebp
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1652(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1656(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1660(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1664(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1668(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1672(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1676(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 1680(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1684(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1688(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1692(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1696(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1700(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1704(%esp), %esi
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 1928(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1584(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1584(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1592(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1600(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1604(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1608(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1620(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1628(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1636(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl 1640(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1520(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1520(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 1544(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 1564(%esp), %ebp
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1568(%esp), %esi
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 1572(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1576(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1456(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1456(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1472(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1484(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1488(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1492(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1496(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ adcl 1500(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl 1504(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1512(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1392(%esp), %ecx
+ movl 1932(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ andl $1, %edi
+ movl %edi, %eax
+ addl 1392(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1396(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1400(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1404(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1408(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1412(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1416(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1420(%esp), %esi
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 1424(%esp), %ebp
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1428(%esp), %edi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1432(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1436(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1440(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1448(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1328(%esp), %ecx
+ movl 1924(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1328(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1352(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1356(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl 1360(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1384(%esp), %edi
+ sbbl %esi, %esi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1264(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1284(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1312(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1316(%esp), %esi
+ adcl 1320(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1200(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 68(%esp), %eax # 4-byte Reload
+ addl 1200(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1204(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1208(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1212(%esp), %edi
+ adcl 1216(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1224(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1228(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1244(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 1248(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1252(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1256(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %eax, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1136(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1136(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1144(%esp), %ebp
+ adcl 1148(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1172(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 1180(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1072(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 1072(%esp), %eax
+ adcl 1076(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1088(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1092(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1096(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1100(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1104(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1108(%esp), %ebp
+ adcl 1112(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1116(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1120(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1124(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1128(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %eax, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1008(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1008(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1020(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 1036(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1044(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1052(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 944(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 944(%esp), %eax
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 952(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 956(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 960(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 964(%esp), %esi
+ adcl 968(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 972(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 976(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 980(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 984(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 988(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 992(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 996(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1000(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %eax, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %ebp
+ addl 880(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 896(%esp), %edi
+ adcl 900(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 924(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 816(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 824(%esp), %ebp
+ adcl 828(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 856(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 872(%esp), %esi
+ sbbl %eax, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl 56(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 752(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 760(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 764(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 768(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 772(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 776(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 784(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 792(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 796(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 800(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 804(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 808(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 688(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 728(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 732(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 740(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 624(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 636(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 648(%esp), %esi
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 652(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 560(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 568(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 580(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl 584(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 592(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 496(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 508(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 520(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 528(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 540(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 432(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 440(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 444(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 452(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 472(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 368(%esp), %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 376(%esp), %esi
+ adcl 380(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 304(%esp), %ecx
+ adcl 308(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl 312(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 324(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 328(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 240(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 240(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 248(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 252(%esp), %edi
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 256(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 264(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 268(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 104(%esp), %ecx # 4-byte Reload
+ addl 176(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 184(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ adcl 188(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 192(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 200(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %ebp
+ addl 112(%esp), %esi
+ movl 100(%esp), %esi # 4-byte Reload
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 124(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl 128(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 132(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 140(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 148(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 152(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 160(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 164(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 168(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 1932(%esp), %ecx
+ subl (%ecx), %eax
+ sbbl 4(%ecx), %edx
+ sbbl 8(%ecx), %esi
+ sbbl 12(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 20(%ecx), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 24(%ecx), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 60(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ecx), %ebx
+ movl 52(%esp), %edi # 4-byte Reload
+ sbbl 32(%ecx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ sbbl 36(%ecx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 40(%ecx), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 44(%ecx), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ sbbl 48(%ecx), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ sbbl 52(%ecx), %edi
+ movl %ebp, %ecx
+ movl %edi, 104(%esp) # 4-byte Spill
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB210_2
+# BB#1:
+ movl %ebx, 60(%esp) # 4-byte Spill
+.LBB210_2:
+ testb %cl, %cl
+ movl 108(%esp), %ebx # 4-byte Reload
+ jne .LBB210_4
+# BB#3:
+ movl %eax, %ebx
+.LBB210_4:
+ movl 1920(%esp), %eax
+ movl %ebx, (%eax)
+ movl 92(%esp), %edi # 4-byte Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB210_6
+# BB#5:
+ movl %edx, %edi
+.LBB210_6:
+ movl %edi, 4(%eax)
+ jne .LBB210_8
+# BB#7:
+ movl %esi, 100(%esp) # 4-byte Spill
+.LBB210_8:
+ movl 100(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%eax)
+ jne .LBB210_10
+# BB#9:
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+.LBB210_10:
+ movl 84(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%eax)
+ jne .LBB210_12
+# BB#11:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB210_12:
+ movl %ecx, 16(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ jne .LBB210_14
+# BB#13:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB210_14:
+ movl %ecx, 20(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB210_16
+# BB#15:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB210_16:
+ movl %ecx, 24(%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ jne .LBB210_18
+# BB#17:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB210_18:
+ movl %ecx, 32(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB210_20
+# BB#19:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB210_20:
+ movl %ecx, 36(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB210_22
+# BB#21:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB210_22:
+ movl %ecx, 40(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ jne .LBB210_24
+# BB#23:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB210_24:
+ movl %ecx, 44(%eax)
+ movl 88(%esp), %ecx # 4-byte Reload
+ jne .LBB210_26
+# BB#25:
+ movl 48(%esp), %ecx # 4-byte Reload
+.LBB210_26:
+ movl %ecx, 48(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ jne .LBB210_28
+# BB#27:
+ movl 104(%esp), %ecx # 4-byte Reload
+.LBB210_28:
+ movl %ecx, 52(%eax)
+ addl $1900, %esp # imm = 0x76C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end210:
+ .size mcl_fp_mont14L, .Lfunc_end210-mcl_fp_mont14L
+
+ .globl mcl_fp_montNF14L
+ .align 16, 0x90
+ .type mcl_fp_montNF14L,@function
+mcl_fp_montNF14L: # @mcl_fp_montNF14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1884, %esp # imm = 0x75C
+ calll .L211$pb
+.L211$pb:
+ popl %ebx
+.Ltmp42:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp42-.L211$pb), %ebx
+ movl 1916(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1824(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1824(%esp), %edi
+ movl 1828(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1880(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 1876(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1872(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1868(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1864(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1860(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1856(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1852(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1848(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1844(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1840(%esp), %esi
+ movl 1836(%esp), %ebp
+ movl 1832(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1760(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1760(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1768(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1772(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl 1776(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1784(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1804(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1808(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1816(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1696(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1752(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1696(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1700(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1704(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1708(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1712(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1716(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1720(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1724(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1728(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1732(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1736(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1740(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ adcl 1748(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %ebp
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1632(%esp), %ecx
+ movl 1916(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ addl 1632(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1640(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 1664(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1680(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1688(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1568(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1624(%esp), %eax
+ movl 88(%esp), %edx # 4-byte Reload
+ addl 1568(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1572(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1576(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1580(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1584(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1588(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1592(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 1596(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1600(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1604(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1608(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 1612(%esp), %edi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1616(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1620(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %ebp
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1504(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1504(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1512(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1544(%esp), %esi
+ adcl 1548(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1560(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1440(%esp), %ecx
+ movl 1908(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ movl 1496(%esp), %eax
+ movl 72(%esp), %edx # 4-byte Reload
+ addl 1440(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1448(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1452(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1460(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1464(%esp), %edi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1468(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1472(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1476(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1480(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1484(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1488(%esp), %esi
+ adcl 1492(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1376(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1376(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1400(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1424(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1312(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1368(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1312(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1320(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 1328(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1336(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1340(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1344(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1348(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1352(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1360(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1364(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1248(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1248(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 1264(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1272(%esp), %ebp
+ adcl 1276(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1284(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1300(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1184(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1240(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1184(%esp), %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1204(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1216(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1232(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1120(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1120(%esp), %esi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 1124(%esp), %ebp
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1128(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1156(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1056(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1112(%esp), %eax
+ movl %ebp, %ecx
+ addl 1056(%esp), %ecx
+ adcl 1060(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 1064(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1068(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1072(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1076(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1080(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1084(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 1088(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1092(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 1096(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 1100(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1104(%esp), %ebp
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 1108(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 992(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 1008(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1040(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1044(%esp), %ebp
+ adcl 1048(%esp), %esi
+ movl 1912(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 984(%esp), %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 932(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 936(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 940(%esp), %edi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 944(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 960(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 968(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 972(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 976(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 980(%esp), %esi
+ movl %esi, %ebp
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 864(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 864(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 876(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 884(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 916(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 920(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 800(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 856(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 800(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 808(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 816(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 828(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 852(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 736(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 736(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 764(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 772(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 780(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 728(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 672(%esp), %ecx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 700(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl 704(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 712(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 608(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 616(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 624(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 644(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 600(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 544(%esp), %ecx
+ adcl 548(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 556(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 568(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 576(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 480(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 488(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 496(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 504(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 532(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 472(%esp), %edx
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 416(%esp), %ecx
+ adcl 420(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 424(%esp), %edi
+ adcl 428(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 464(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 352(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 360(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 364(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 388(%esp), %edi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 288(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 344(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 288(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 296(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 320(%esp), %edi
+ adcl 324(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 328(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 224(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 232(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 256(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 260(%esp), %edi
+ adcl 264(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 216(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 160(%esp), %ecx
+ adcl 164(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 168(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 192(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 96(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 96(%esp), %esi
+ movl 64(%esp), %esi # 4-byte Reload
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 104(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ adcl 108(%esp), %esi
+ adcl 112(%esp), %edi
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 132(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1916(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %ebx
+ movl %esi, %eax
+ sbbl 8(%ebp), %eax
+ movl %edi, %ecx
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 40(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 44(%ebp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 48(%ebp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 52(%ebp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ movl 92(%esp), %ebp # 4-byte Reload
+ js .LBB211_2
+# BB#1:
+ movl %edx, %ebp
+.LBB211_2:
+ movl 1904(%esp), %edx
+ movl %ebp, (%edx)
+ movl 88(%esp), %ebp # 4-byte Reload
+ js .LBB211_4
+# BB#3:
+ movl %ebx, %ebp
+.LBB211_4:
+ movl %ebp, 4(%edx)
+ js .LBB211_6
+# BB#5:
+ movl %eax, %esi
+.LBB211_6:
+ movl %esi, 8(%edx)
+ js .LBB211_8
+# BB#7:
+ movl 4(%esp), %edi # 4-byte Reload
+.LBB211_8:
+ movl %edi, 12(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB211_10
+# BB#9:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB211_10:
+ movl %eax, 16(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB211_12
+# BB#11:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB211_12:
+ movl %eax, 20(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB211_14
+# BB#13:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB211_14:
+ movl %eax, 24(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB211_16
+# BB#15:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB211_16:
+ movl %eax, 28(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB211_18
+# BB#17:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB211_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB211_20
+# BB#19:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB211_20:
+ movl %eax, 36(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB211_22
+# BB#21:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB211_22:
+ movl %eax, 40(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB211_24
+# BB#23:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB211_24:
+ movl %eax, 44(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB211_26
+# BB#25:
+ movl 64(%esp), %eax # 4-byte Reload
+.LBB211_26:
+ movl %eax, 48(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB211_28
+# BB#27:
+ movl 72(%esp), %eax # 4-byte Reload
+.LBB211_28:
+ movl %eax, 52(%edx)
+ addl $1884, %esp # imm = 0x75C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end211:
+ .size mcl_fp_montNF14L, .Lfunc_end211-mcl_fp_montNF14L
+
+ .globl mcl_fp_montRed14L
+ .align 16, 0x90
+ .type mcl_fp_montRed14L,@function
+mcl_fp_montRed14L: # @mcl_fp_montRed14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1068, %esp # imm = 0x42C
+ calll .L212$pb
+.L212$pb:
+ popl %eax
+.Ltmp43:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp43-.L212$pb), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1096(%esp), %edx
+ movl -4(%edx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1092(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 92(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ imull %eax, %ebx
+ movl 108(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 100(%ecx), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 92(%ecx), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 84(%ecx), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 80(%ecx), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 68(%ecx), %esi
+ movl %esi, 168(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ movl %esi, 164(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 52(%ecx), %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 40(%ecx), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 36(%ecx), %ebp
+ movl 32(%ecx), %edi
+ movl 28(%ecx), %esi
+ movl 24(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 52(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 1008(%esp), %ecx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ movl 92(%esp), %eax # 4-byte Reload
+ addl 1008(%esp), %eax
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1012(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1036(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1040(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 1044(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 1052(%esp), %ebp
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 944(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 944(%esp), %esi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 976(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 984(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %esi # 4-byte Reload
+ adcl 1000(%esp), %esi
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %ebp
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 880(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 884(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 908(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 920(%esp), %edi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl 932(%esp), %esi
+ movl %esi, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 816(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 820(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 152(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 752(%esp), %ebp
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 688(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 156(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 140(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 624(%esp), %ebp
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 628(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %ebp # 4-byte Reload
+ adcl 664(%esp), %ebp
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 560(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 564(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ adcl 596(%esp), %ebp
+ movl %ebp, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %edi # 4-byte Reload
+ adcl 600(%esp), %edi
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1096(%esp), %eax
+ movl %eax, %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 496(%esp), %ebp
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %ebp # 4-byte Reload
+ adcl 516(%esp), %ebp
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl 532(%esp), %edi
+ movl %edi, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 536(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 432(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 440(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ adcl 448(%esp), %ebp
+ movl %ebp, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %ecx # 4-byte Reload
+ adcl 452(%esp), %ecx
+ movl %ecx, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %ebp # 4-byte Reload
+ adcl 456(%esp), %ebp
+ movl 164(%esp), %ecx # 4-byte Reload
+ adcl 460(%esp), %ecx
+ movl %ecx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 464(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ adcl 468(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 484(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %eax, %esi
+ movl 88(%esp), %edi # 4-byte Reload
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 368(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 376(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 380(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %esi # 4-byte Reload
+ adcl 384(%esp), %esi
+ adcl 388(%esp), %ebp
+ movl %ebp, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %ecx # 4-byte Reload
+ adcl 392(%esp), %ecx
+ movl %ecx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 396(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 400(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 404(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 408(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 412(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 416(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 424(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %eax, %ebp
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 304(%esp), %ebp
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 308(%esp), %edi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 316(%esp), %ebp
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %edi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 240(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 240(%esp), %edi
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 244(%esp), %ecx
+ adcl 248(%esp), %ebp
+ movl %ebp, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl 264(%esp), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl 268(%esp), %edi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 280(%esp), %ebp
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 176(%esp), %esi
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %ebx # 4-byte Reload
+ adcl 188(%esp), %ebx
+ movl %ebx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %edx # 4-byte Reload
+ adcl 196(%esp), %edx
+ movl %edx, 136(%esp) # 4-byte Spill
+ movl %edi, %eax
+ adcl 200(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl 212(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 232(%esp), %ecx
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 172(%esp), %edi # 4-byte Reload
+ subl 16(%esp), %edi # 4-byte Folded Reload
+ movl 160(%esp), %ebp # 4-byte Reload
+ sbbl 8(%esp), %ebp # 4-byte Folded Reload
+ sbbl 12(%esp), %ebx # 4-byte Folded Reload
+ movl 168(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ sbbl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 148(%esp), %edx # 4-byte Reload
+ sbbl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 156(%esp), %edx # 4-byte Reload
+ sbbl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 152(%esp), %edx # 4-byte Reload
+ sbbl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 124(%esp), %edx # 4-byte Reload
+ sbbl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 140(%esp), %edx # 4-byte Reload
+ sbbl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 128(%esp), %edx # 4-byte Reload
+ sbbl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %edx # 4-byte Reload
+ sbbl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ sbbl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 132(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ sbbl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 144(%esp) # 4-byte Spill
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB212_2
+# BB#1:
+ movl %eax, 168(%esp) # 4-byte Spill
+.LBB212_2:
+ movl %esi, %edx
+ testb %dl, %dl
+ movl 172(%esp), %eax # 4-byte Reload
+ jne .LBB212_4
+# BB#3:
+ movl %edi, %eax
+.LBB212_4:
+ movl 1088(%esp), %edi
+ movl %eax, (%edi)
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ movl 160(%esp), %ecx # 4-byte Reload
+ jne .LBB212_6
+# BB#5:
+ movl %ebp, %ecx
+.LBB212_6:
+ movl %ecx, 4(%edi)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl 164(%esp), %ebp # 4-byte Reload
+ jne .LBB212_8
+# BB#7:
+ movl %ebx, %ebp
+.LBB212_8:
+ movl %ebp, 8(%edi)
+ movl 168(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%edi)
+ movl 124(%esp), %ebp # 4-byte Reload
+ movl 136(%esp), %ebx # 4-byte Reload
+ jne .LBB212_10
+# BB#9:
+ movl 80(%esp), %ebx # 4-byte Reload
+.LBB212_10:
+ movl %ebx, 16(%edi)
+ movl 140(%esp), %ebx # 4-byte Reload
+ movl 148(%esp), %esi # 4-byte Reload
+ jne .LBB212_12
+# BB#11:
+ movl 84(%esp), %esi # 4-byte Reload
+.LBB212_12:
+ movl %esi, 20(%edi)
+ movl 128(%esp), %esi # 4-byte Reload
+ jne .LBB212_14
+# BB#13:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB212_14:
+ movl %eax, 24(%edi)
+ movl 120(%esp), %edx # 4-byte Reload
+ jne .LBB212_16
+# BB#15:
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, 152(%esp) # 4-byte Spill
+.LBB212_16:
+ movl 152(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%edi)
+ jne .LBB212_18
+# BB#17:
+ movl 96(%esp), %ebp # 4-byte Reload
+.LBB212_18:
+ movl %ebp, 32(%edi)
+ jne .LBB212_20
+# BB#19:
+ movl 100(%esp), %ebx # 4-byte Reload
+.LBB212_20:
+ movl %ebx, 36(%edi)
+ jne .LBB212_22
+# BB#21:
+ movl 112(%esp), %esi # 4-byte Reload
+.LBB212_22:
+ movl %esi, 40(%edi)
+ jne .LBB212_24
+# BB#23:
+ movl 116(%esp), %edx # 4-byte Reload
+.LBB212_24:
+ movl %edx, 44(%edi)
+ jne .LBB212_26
+# BB#25:
+ movl 132(%esp), %ecx # 4-byte Reload
+.LBB212_26:
+ movl %ecx, 48(%edi)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB212_28
+# BB#27:
+ movl 144(%esp), %eax # 4-byte Reload
+.LBB212_28:
+ movl %eax, 52(%edi)
+ addl $1068, %esp # imm = 0x42C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end212:
+ .size mcl_fp_montRed14L, .Lfunc_end212-mcl_fp_montRed14L
+
+ .globl mcl_fp_addPre14L
+ .align 16, 0x90
+ .type mcl_fp_addPre14L,@function
+mcl_fp_addPre14L: # @mcl_fp_addPre14L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ebx)
+ movl 40(%ecx), %esi
+ adcl %edi, %esi
+ movl 44(%eax), %edi
+ movl %edx, 36(%ebx)
+ movl 44(%ecx), %edx
+ adcl %edi, %edx
+ movl 48(%eax), %edi
+ movl %esi, 40(%ebx)
+ movl 48(%ecx), %esi
+ adcl %edi, %esi
+ movl %edx, 44(%ebx)
+ movl %esi, 48(%ebx)
+ movl 52(%eax), %eax
+ movl 52(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 52(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end213:
+ .size mcl_fp_addPre14L, .Lfunc_end213-mcl_fp_addPre14L
+
+ .globl mcl_fp_subPre14L
+ .align 16, 0x90
+ .type mcl_fp_subPre14L,@function
+mcl_fp_subPre14L: # @mcl_fp_subPre14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ebp)
+ movl 40(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 44(%edx), %ebx
+ movl %esi, 36(%ebp)
+ movl 44(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 48(%edx), %ebx
+ movl %edi, 40(%ebp)
+ movl 48(%ecx), %edi
+ sbbl %ebx, %edi
+ movl %esi, 44(%ebp)
+ movl %edi, 48(%ebp)
+ movl 52(%edx), %edx
+ movl 52(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 52(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end214:
+ .size mcl_fp_subPre14L, .Lfunc_end214-mcl_fp_subPre14L
+
+ .globl mcl_fp_shr1_14L
+ .align 16, 0x90
+ .type mcl_fp_shr1_14L,@function
+mcl_fp_shr1_14L: # @mcl_fp_shr1_14L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 48(%ecx)
+ shrl %eax
+ movl %eax, 52(%ecx)
+ popl %esi
+ retl
+.Lfunc_end215:
+ .size mcl_fp_shr1_14L, .Lfunc_end215-mcl_fp_shr1_14L
+
+ .globl mcl_fp_add14L
+ .align 16, 0x90
+ .type mcl_fp_add14L,@function
+mcl_fp_add14L: # @mcl_fp_add14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 72(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 68(%esp), %ebp
+ addl (%ebp), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 4(%ebp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 8(%eax), %ecx
+ adcl 8(%ebp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 12(%ebp), %edx
+ movl 16(%ebp), %ecx
+ adcl 12(%eax), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 16(%eax), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%ebp), %ecx
+ adcl 20(%eax), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 24(%ebp), %ecx
+ adcl 24(%eax), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 28(%ebp), %ecx
+ adcl 28(%eax), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 32(%ebp), %ecx
+ adcl 32(%eax), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 36(%ebp), %ecx
+ adcl 36(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 40(%ebp), %edx
+ adcl 40(%eax), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 44(%ebp), %ebx
+ adcl 44(%eax), %ebx
+ movl 48(%ebp), %esi
+ adcl 48(%eax), %esi
+ movl 52(%ebp), %edi
+ adcl 52(%eax), %edi
+ movl 64(%esp), %eax
+ movl 4(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl %edx, 40(%eax)
+ movl %ebx, 44(%eax)
+ movl %esi, 48(%eax)
+ movl %edi, 52(%eax)
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 76(%esp), %edx
+ subl (%edx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ sbbl 4(%edx), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ sbbl 8(%edx), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ sbbl 12(%edx), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ sbbl 16(%edx), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %ebp # 4-byte Reload
+ sbbl 20(%edx), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %ebp # 4-byte Reload
+ sbbl 24(%edx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %ebp # 4-byte Reload
+ sbbl 28(%edx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ sbbl 32(%edx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebp # 4-byte Reload
+ sbbl 36(%edx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl (%esp), %ebp # 4-byte Reload
+ sbbl 40(%edx), %ebp
+ sbbl 44(%edx), %ebx
+ sbbl 48(%edx), %esi
+ sbbl 52(%edx), %edi
+ sbbl $0, %ecx
+ testb $1, %cl
+ jne .LBB216_2
+# BB#1: # %nocarry
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, (%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl %ebp, 40(%eax)
+ movl %ebx, 44(%eax)
+ movl %esi, 48(%eax)
+ movl %edi, 52(%eax)
+.LBB216_2: # %carry
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end216:
+ .size mcl_fp_add14L, .Lfunc_end216-mcl_fp_add14L
+
+ .globl mcl_fp_addNF14L
+ .align 16, 0x90
+ .type mcl_fp_addNF14L,@function
+mcl_fp_addNF14L: # @mcl_fp_addNF14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $112, %esp
+ movl 140(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 136(%esp), %ecx
+ addl (%ecx), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 4(%ecx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 52(%eax), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 48(%eax), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 44(%eax), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 40(%eax), %ebp
+ movl 36(%eax), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 32(%eax), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 28(%eax), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 24(%eax), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 20(%eax), %ebx
+ movl 16(%eax), %edi
+ movl 12(%eax), %esi
+ movl 8(%eax), %edx
+ adcl 8(%ecx), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 12(%ecx), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 16(%ecx), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 20(%ecx), %ebx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 24(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 28(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 32(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 36(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 40(%ecx), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 44(%ecx), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 48(%ecx), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 52(%ecx), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ subl (%ecx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 4(%ecx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 8(%ecx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ movl %edx, %eax
+ sbbl 24(%ecx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 28(%ecx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 32(%ecx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ sbbl 36(%ecx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ sbbl 40(%ecx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ movl %eax, %esi
+ movl %eax, %ebp
+ sbbl 44(%ecx), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, %esi
+ sbbl 48(%ecx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, %edi
+ sbbl 52(%ecx), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl %edi, %ecx
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ movl 72(%esp), %ecx # 4-byte Reload
+ js .LBB217_2
+# BB#1:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB217_2:
+ movl 132(%esp), %edi
+ movl %ecx, (%edi)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB217_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB217_4:
+ movl %eax, 4(%edi)
+ movl %edx, %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ js .LBB217_6
+# BB#5:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB217_6:
+ movl %edx, 8(%edi)
+ movl %ebp, %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ js .LBB217_8
+# BB#7:
+ movl 12(%esp), %ebp # 4-byte Reload
+.LBB217_8:
+ movl %ebp, 12(%edi)
+ movl 100(%esp), %ebp # 4-byte Reload
+ js .LBB217_10
+# BB#9:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB217_10:
+ movl %eax, 16(%edi)
+ movl 80(%esp), %esi # 4-byte Reload
+ js .LBB217_12
+# BB#11:
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+.LBB217_12:
+ movl 68(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%edi)
+ js .LBB217_14
+# BB#13:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB217_14:
+ movl %ecx, 24(%edi)
+ js .LBB217_16
+# BB#15:
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+.LBB217_16:
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%edi)
+ js .LBB217_18
+# BB#17:
+ movl 32(%esp), %ebp # 4-byte Reload
+.LBB217_18:
+ movl %ebp, 32(%edi)
+ js .LBB217_20
+# BB#19:
+ movl 36(%esp), %ebx # 4-byte Reload
+.LBB217_20:
+ movl %ebx, 36(%edi)
+ js .LBB217_22
+# BB#21:
+ movl 40(%esp), %esi # 4-byte Reload
+.LBB217_22:
+ movl %esi, 40(%edi)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB217_24
+# BB#23:
+ movl 44(%esp), %edx # 4-byte Reload
+.LBB217_24:
+ movl %edx, 44(%edi)
+ movl 92(%esp), %ecx # 4-byte Reload
+ js .LBB217_26
+# BB#25:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB217_26:
+ movl %eax, 48(%edi)
+ js .LBB217_28
+# BB#27:
+ movl 52(%esp), %ecx # 4-byte Reload
+.LBB217_28:
+ movl %ecx, 52(%edi)
+ addl $112, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end217:
+ .size mcl_fp_addNF14L, .Lfunc_end217-mcl_fp_addNF14L
+
+ .globl mcl_fp_sub14L
+ .align 16, 0x90
+ .type mcl_fp_sub14L,@function
+mcl_fp_sub14L: # @mcl_fp_sub14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 76(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 80(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ sbbl 32(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 36(%esi), %edx
+ sbbl 36(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 40(%esi), %ecx
+ sbbl 40(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ sbbl 44(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 48(%esi), %ebp
+ sbbl 48(%edi), %ebp
+ movl 52(%esi), %esi
+ sbbl 52(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 72(%esp), %ebx
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%ebx)
+ movl %edx, 36(%ebx)
+ movl %ecx, 40(%ebx)
+ movl %eax, 44(%ebx)
+ movl %ebp, 48(%ebx)
+ movl %esi, 52(%ebx)
+ je .LBB218_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 84(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 8(%esi), %edi
+ movl 12(%esi), %eax
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 44(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl %eax, 44(%ebx)
+ movl 48(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 48(%ebx)
+ movl 52(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ebx)
+.LBB218_2: # %nocarry
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end218:
+ .size mcl_fp_sub14L, .Lfunc_end218-mcl_fp_sub14L
+
+ .globl mcl_fp_subNF14L
+ .align 16, 0x90
+ .type mcl_fp_subNF14L,@function
+mcl_fp_subNF14L: # @mcl_fp_subNF14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 112(%esp), %ecx
+ movl 52(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 116(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 32(%ecx), %ebp
+ movl 28(%ecx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ sbbl 32(%edi), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 52(%edi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %eax, %esi
+ sarl $31, %esi
+ movl %esi, %ecx
+ addl %ecx, %ecx
+ movl %esi, %ebp
+ adcl %ebp, %ebp
+ shrl $31, %eax
+ orl %ecx, %eax
+ movl 120(%esp), %edi
+ andl 4(%edi), %ebp
+ andl (%edi), %eax
+ movl 52(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 48(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 40(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 36(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 32(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 28(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 24(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 20(%edi), %ebx
+ andl %esi, %ebx
+ movl 16(%edi), %edx
+ andl %esi, %edx
+ movl 12(%edi), %ecx
+ andl %esi, %ecx
+ andl 8(%edi), %esi
+ addl 56(%esp), %eax # 4-byte Folded Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl 108(%esp), %edi
+ movl %eax, (%edi)
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %ebp, 4(%edi)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %esi, 8(%edi)
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 12(%edi)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, 16(%edi)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%edi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 24(%edi)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 28(%edi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%edi)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%edi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 44(%edi)
+ movl %eax, 48(%edi)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%edi)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end219:
+ .size mcl_fp_subNF14L, .Lfunc_end219-mcl_fp_subNF14L
+
+ .globl mcl_fpDbl_add14L
+ .align 16, 0x90
+ .type mcl_fpDbl_add14L,@function
+mcl_fpDbl_add14L: # @mcl_fpDbl_add14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 128(%esp), %ecx
+ movl 124(%esp), %esi
+ movl 12(%esi), %edi
+ movl 16(%esi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%esi), %ebp
+ movl 120(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%esi), %ebp
+ adcl 8(%esi), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 64(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%esi), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%esi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%esi), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%esi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%esi), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%esi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%esi), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %edx, 40(%eax)
+ movl 48(%esi), %edx
+ adcl %ebx, %edx
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%esi), %edi
+ adcl %ebx, %edi
+ movl 56(%ecx), %ebx
+ movl %edx, 48(%eax)
+ movl 56(%esi), %edx
+ adcl %ebx, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 60(%ecx), %edx
+ movl %edi, 52(%eax)
+ movl 60(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 64(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%ecx), %edx
+ movl 68(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%ecx), %edx
+ movl 72(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 76(%ecx), %edx
+ movl 76(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%ecx), %edx
+ movl 80(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%ecx), %edx
+ movl 84(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 88(%ecx), %edx
+ movl 88(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 92(%ecx), %edx
+ movl 92(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 96(%ecx), %edx
+ movl 96(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 100(%ecx), %edx
+ movl 100(%esi), %edi
+ adcl %edx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 104(%ecx), %edx
+ movl 104(%esi), %ebx
+ adcl %edx, %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 108(%ecx), %ecx
+ movl 108(%esi), %esi
+ adcl %ecx, %esi
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 132(%esp), %ebp
+ movl 72(%esp), %ecx # 4-byte Reload
+ subl (%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ sbbl 40(%ebp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 44(%ebp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl %esi, %ebx
+ sbbl 48(%ebp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl 52(%ebp), %esi
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB220_2
+# BB#1:
+ movl %esi, %ebx
+.LBB220_2:
+ testb %dl, %dl
+ movl 72(%esp), %eax # 4-byte Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB220_4
+# BB#3:
+ movl %ecx, %edx
+ movl (%esp), %edi # 4-byte Reload
+ movl 4(%esp), %ebp # 4-byte Reload
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB220_4:
+ movl 120(%esp), %esi
+ movl %eax, 56(%esi)
+ movl 76(%esp), %eax # 4-byte Reload
+ movl %eax, 60(%esi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 64(%esi)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%esi)
+ movl 88(%esp), %eax # 4-byte Reload
+ movl %eax, 72(%esi)
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esi)
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esi)
+ movl %ebp, 84(%esi)
+ movl %edi, 88(%esi)
+ movl %edx, 92(%esi)
+ movl 52(%esp), %edx # 4-byte Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ jne .LBB220_6
+# BB#5:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB220_6:
+ movl %eax, 96(%esi)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB220_8
+# BB#7:
+ movl 40(%esp), %edx # 4-byte Reload
+.LBB220_8:
+ movl %edx, 100(%esi)
+ jne .LBB220_10
+# BB#9:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB220_10:
+ movl %ecx, 104(%esi)
+ movl %ebx, 108(%esi)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end220:
+ .size mcl_fpDbl_add14L, .Lfunc_end220-mcl_fpDbl_add14L
+
+ .globl mcl_fpDbl_sub14L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub14L,@function
+mcl_fpDbl_sub14L: # @mcl_fpDbl_sub14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $96, %esp
+ movl 120(%esp), %ebx
+ movl (%ebx), %eax
+ movl 4(%ebx), %edx
+ movl 124(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %edx
+ movl 8(%ebx), %esi
+ sbbl 8(%ebp), %esi
+ movl 116(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%ebx), %eax
+ sbbl 12(%ebp), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%ebx), %edx
+ sbbl 16(%ebp), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebp), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%ebx), %eax
+ sbbl %esi, %eax
+ movl 24(%ebp), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%ebx), %edx
+ sbbl %esi, %edx
+ movl 28(%ebp), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%ebx), %eax
+ sbbl %esi, %eax
+ movl 32(%ebp), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%ebx), %edx
+ sbbl %esi, %edx
+ movl 36(%ebp), %esi
+ movl %eax, 28(%ecx)
+ movl 36(%ebx), %eax
+ sbbl %esi, %eax
+ movl 40(%ebp), %esi
+ movl %edx, 32(%ecx)
+ movl 40(%ebx), %edx
+ sbbl %esi, %edx
+ movl 44(%ebp), %esi
+ movl %eax, 36(%ecx)
+ movl 44(%ebx), %eax
+ sbbl %esi, %eax
+ movl 48(%ebp), %esi
+ movl %edx, 40(%ecx)
+ movl 48(%ebx), %edx
+ sbbl %esi, %edx
+ movl 52(%ebp), %esi
+ movl %eax, 44(%ecx)
+ movl 52(%ebx), %eax
+ sbbl %esi, %eax
+ movl 56(%ebp), %esi
+ movl %edx, 48(%ecx)
+ movl 56(%ebx), %edx
+ sbbl %esi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 60(%ebp), %edx
+ movl %eax, 52(%ecx)
+ movl 60(%ebx), %eax
+ sbbl %edx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 64(%ebp), %eax
+ movl 64(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 68(%ebp), %eax
+ movl 68(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 72(%ebp), %eax
+ movl 72(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 76(%ebp), %eax
+ movl 76(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 80(%ebp), %eax
+ movl 80(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 84(%ebp), %eax
+ movl 84(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 88(%ebp), %eax
+ movl 88(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 92(%ebp), %eax
+ movl 92(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 96(%ebp), %eax
+ movl 96(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 100(%ebp), %eax
+ movl 100(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 104(%ebp), %eax
+ movl 104(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 108(%ebp), %eax
+ movl 108(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 128(%esp), %ebp
+ jne .LBB221_1
+# BB#2:
+ movl $0, 56(%esp) # 4-byte Folded Spill
+ jmp .LBB221_3
+.LBB221_1:
+ movl 52(%ebp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+.LBB221_3:
+ testb %al, %al
+ jne .LBB221_4
+# BB#5:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB221_6
+.LBB221_4:
+ movl (%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 4(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB221_6:
+ jne .LBB221_7
+# BB#8:
+ movl $0, 32(%esp) # 4-byte Folded Spill
+ jmp .LBB221_9
+.LBB221_7:
+ movl 48(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+.LBB221_9:
+ jne .LBB221_10
+# BB#11:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB221_12
+.LBB221_10:
+ movl 44(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+.LBB221_12:
+ jne .LBB221_13
+# BB#14:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB221_15
+.LBB221_13:
+ movl 40(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB221_15:
+ jne .LBB221_16
+# BB#17:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB221_18
+.LBB221_16:
+ movl 36(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB221_18:
+ jne .LBB221_19
+# BB#20:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB221_21
+.LBB221_19:
+ movl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB221_21:
+ jne .LBB221_22
+# BB#23:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB221_24
+.LBB221_22:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB221_24:
+ jne .LBB221_25
+# BB#26:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB221_27
+.LBB221_25:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB221_27:
+ jne .LBB221_28
+# BB#29:
+ movl $0, %esi
+ jmp .LBB221_30
+.LBB221_28:
+ movl 20(%ebp), %esi
+.LBB221_30:
+ jne .LBB221_31
+# BB#32:
+ movl $0, %edi
+ jmp .LBB221_33
+.LBB221_31:
+ movl 16(%ebp), %edi
+.LBB221_33:
+ jne .LBB221_34
+# BB#35:
+ movl $0, %ebx
+ jmp .LBB221_36
+.LBB221_34:
+ movl 12(%ebp), %ebx
+.LBB221_36:
+ jne .LBB221_37
+# BB#38:
+ xorl %ebp, %ebp
+ jmp .LBB221_39
+.LBB221_37:
+ movl 8(%ebp), %ebp
+.LBB221_39:
+ movl 20(%esp), %edx # 4-byte Reload
+ addl 44(%esp), %edx # 4-byte Folded Reload
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 56(%ecx)
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 60(%ecx)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 64(%ecx)
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 68(%ecx)
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 72(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 76(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 80(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 84(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 96(%ecx)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 100(%ecx)
+ movl %eax, 104(%ecx)
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%ecx)
+ addl $96, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end221:
+ .size mcl_fpDbl_sub14L, .Lfunc_end221-mcl_fpDbl_sub14L
+
+ .align 16, 0x90
+ .type .LmulPv480x32,@function
+.LmulPv480x32: # @mulPv480x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $104, %esp
+ movl %edx, %ebp
+ movl 124(%esp), %esi
+ movl %esi, %eax
+ mull 56(%ebp)
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 52(%ebp)
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 48(%ebp)
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 44(%ebp)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 40(%ebp)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 36(%ebp)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 32(%ebp)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 28(%ebp)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 24(%ebp)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 20(%ebp)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 16(%ebp)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 12(%ebp)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 8(%ebp)
+ movl %edx, %edi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 4(%ebp)
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull (%ebp)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%ecx)
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%ecx)
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 60(%ecx)
+ movl %ecx, %eax
+ addl $104, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end222:
+ .size .LmulPv480x32, .Lfunc_end222-.LmulPv480x32
+
+ .globl mcl_fp_mulUnitPre15L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre15L,@function
+mcl_fp_mulUnitPre15L: # @mcl_fp_mulUnitPre15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $124, %esp
+ calll .L223$pb
+.L223$pb:
+ popl %ebx
+.Ltmp44:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp44-.L223$pb), %ebx
+ movl 152(%esp), %eax
+ movl %eax, (%esp)
+ leal 56(%esp), %ecx
+ movl 148(%esp), %edx
+ calll .LmulPv480x32
+ movl 116(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 108(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 104(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp
+ movl 72(%esp), %ebx
+ movl 68(%esp), %edi
+ movl 64(%esp), %esi
+ movl 56(%esp), %edx
+ movl 60(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ addl $124, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end223:
+ .size mcl_fp_mulUnitPre15L, .Lfunc_end223-mcl_fp_mulUnitPre15L
+
+ .globl mcl_fpDbl_mulPre15L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre15L,@function
+mcl_fpDbl_mulPre15L: # @mcl_fpDbl_mulPre15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1084, %esp # imm = 0x43C
+ calll .L224$pb
+.L224$pb:
+ popl %esi
+.Ltmp45:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp45-.L224$pb), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 1112(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl 1108(%esp), %edi
+ movl %edi, %edx
+ movl %esi, %ebx
+ calll .LmulPv480x32
+ movl 1076(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1072(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1068(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1016(%esp), %eax
+ movl 1020(%esp), %ebp
+ movl 1104(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 1112(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl %edi, %edx
+ movl %esi, %ebx
+ calll .LmulPv480x32
+ addl 952(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 1012(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1008(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1004(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1000(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 996(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 992(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 972(%esp), %edi
+ movl 968(%esp), %esi
+ movl 964(%esp), %edx
+ movl 956(%esp), %eax
+ movl 960(%esp), %ecx
+ movl 1104(%esp), %ebp
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 72(%esp), %eax # 4-byte Reload
+ addl 888(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 948(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 944(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 940(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 936(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 932(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 928(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 924(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 916(%esp), %ebx
+ movl 912(%esp), %edi
+ movl 908(%esp), %esi
+ movl 904(%esp), %edx
+ movl 900(%esp), %ecx
+ movl 892(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 72(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 112(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 824(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 876(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 872(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 868(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 864(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 856(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 852(%esp), %ebx
+ movl 848(%esp), %edi
+ movl 844(%esp), %esi
+ movl 840(%esp), %edx
+ movl 836(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 760(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 788(%esp), %ebx
+ movl 784(%esp), %edi
+ movl 780(%esp), %esi
+ movl 776(%esp), %edx
+ movl 772(%esp), %ecx
+ movl 764(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 732(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 724(%esp), %ebx
+ movl 720(%esp), %edi
+ movl 716(%esp), %esi
+ movl 712(%esp), %edx
+ movl 708(%esp), %ecx
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 632(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %edx
+ movl 644(%esp), %ecx
+ movl 636(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 568(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 596(%esp), %ebx
+ movl 592(%esp), %edi
+ movl 588(%esp), %esi
+ movl 584(%esp), %edx
+ movl 580(%esp), %ecx
+ movl 572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 504(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 532(%esp), %ebx
+ movl 528(%esp), %edi
+ movl 524(%esp), %esi
+ movl 520(%esp), %edx
+ movl 516(%esp), %ecx
+ movl 508(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 440(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 468(%esp), %ebx
+ movl 464(%esp), %edi
+ movl 460(%esp), %esi
+ movl 456(%esp), %edx
+ movl 452(%esp), %ecx
+ movl 444(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 376(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 404(%esp), %ebx
+ movl 400(%esp), %edi
+ movl 396(%esp), %esi
+ movl 392(%esp), %edx
+ movl 388(%esp), %ecx
+ movl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 312(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 348(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 340(%esp), %ebx
+ movl 336(%esp), %edi
+ movl 332(%esp), %esi
+ movl 328(%esp), %edx
+ movl 324(%esp), %ecx
+ movl 316(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl %eax, 108(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 1108(%esp), %eax
+ movl %eax, %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 108(%esp), %eax # 4-byte Reload
+ addl 248(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 288(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 276(%esp), %ebx
+ movl 272(%esp), %edi
+ movl 268(%esp), %edx
+ movl 264(%esp), %ecx
+ movl 260(%esp), %eax
+ movl 252(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 256(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ movl 1104(%esp), %ebp
+ movl %esi, 48(%ebp)
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 20(%esp), %esi # 4-byte Reload
+ adcl %esi, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 232(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 220(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 212(%esp), %ebx
+ movl 208(%esp), %edx
+ movl 204(%esp), %ecx
+ movl 200(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl 192(%esp), %esi
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl 1104(%esp), %edi
+ movl %ebp, 52(%edi)
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl %edi, 40(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 64(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 120(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 124(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 128(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp
+ movl 144(%esp), %edi
+ movl 140(%esp), %esi
+ movl 136(%esp), %edx
+ movl 132(%esp), %ecx
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebx # 4-byte Reload
+ movl %ebx, 56(%eax)
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl %ebx, 60(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %ebx # 4-byte Reload
+ movl %ebx, 64(%eax)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 76(%eax)
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 80(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, 84(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 92(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl %ecx, 108(%eax)
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 112(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 116(%eax)
+ addl $1084, %esp # imm = 0x43C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end224:
+ .size mcl_fpDbl_mulPre15L, .Lfunc_end224-mcl_fpDbl_mulPre15L
+
+ .globl mcl_fpDbl_sqrPre15L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre15L,@function
+mcl_fpDbl_sqrPre15L: # @mcl_fpDbl_sqrPre15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1084, %esp # imm = 0x43C
+ calll .L225$pb
+.L225$pb:
+ popl %ebx
+.Ltmp46:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp46-.L225$pb), %ebx
+ movl %ebx, 116(%esp) # 4-byte Spill
+ movl 1108(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl %edx, %edi
+ movl %ebx, %esi
+ calll .LmulPv480x32
+ movl 1076(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1072(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1068(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1016(%esp), %eax
+ movl 1020(%esp), %ebp
+ movl 1104(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl %esi, %ebx
+ calll .LmulPv480x32
+ addl 952(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 1012(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1008(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1004(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1000(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 996(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 992(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 972(%esp), %edi
+ movl 968(%esp), %esi
+ movl 964(%esp), %edx
+ movl 956(%esp), %eax
+ movl 960(%esp), %ecx
+ movl 1104(%esp), %ebp
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 72(%esp), %eax # 4-byte Reload
+ addl 888(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 948(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 944(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 940(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 936(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 932(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 928(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 924(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 916(%esp), %ebx
+ movl 912(%esp), %edi
+ movl 908(%esp), %esi
+ movl 904(%esp), %edx
+ movl 900(%esp), %ecx
+ movl 892(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 72(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 112(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 824(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 876(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 872(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 868(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 864(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 856(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 852(%esp), %ebx
+ movl 848(%esp), %edi
+ movl 844(%esp), %esi
+ movl 840(%esp), %edx
+ movl 836(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 760(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 788(%esp), %ebx
+ movl 784(%esp), %edi
+ movl 780(%esp), %esi
+ movl 776(%esp), %edx
+ movl 772(%esp), %ecx
+ movl 764(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 732(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 724(%esp), %ebx
+ movl 720(%esp), %edi
+ movl 716(%esp), %esi
+ movl 712(%esp), %edx
+ movl 708(%esp), %ecx
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 632(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %edx
+ movl 644(%esp), %ecx
+ movl 636(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 568(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 596(%esp), %ebx
+ movl 592(%esp), %edi
+ movl 588(%esp), %esi
+ movl 584(%esp), %edx
+ movl 580(%esp), %ecx
+ movl 572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 504(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 532(%esp), %ebx
+ movl 528(%esp), %edi
+ movl 524(%esp), %esi
+ movl 520(%esp), %edx
+ movl 516(%esp), %ecx
+ movl 508(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 440(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 468(%esp), %ebx
+ movl 464(%esp), %edi
+ movl 460(%esp), %esi
+ movl 456(%esp), %edx
+ movl 452(%esp), %ecx
+ movl 444(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 376(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 404(%esp), %ebx
+ movl 400(%esp), %edi
+ movl 396(%esp), %esi
+ movl 392(%esp), %edx
+ movl 388(%esp), %ecx
+ movl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 44(%edx), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 312(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 348(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 340(%esp), %ebx
+ movl 336(%esp), %edi
+ movl 332(%esp), %esi
+ movl 328(%esp), %edx
+ movl 324(%esp), %ecx
+ movl 316(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl %eax, 108(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 48(%edx), %eax
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 108(%esp), %eax # 4-byte Reload
+ addl 248(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 288(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 276(%esp), %ebx
+ movl 272(%esp), %edi
+ movl 268(%esp), %edx
+ movl 264(%esp), %ecx
+ movl 260(%esp), %eax
+ movl 252(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 256(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ movl 1104(%esp), %ebp
+ movl %esi, 48(%ebp)
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 20(%esp), %esi # 4-byte Reload
+ adcl %esi, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 52(%edx), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 232(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 220(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 212(%esp), %ebx
+ movl 208(%esp), %edx
+ movl 204(%esp), %ecx
+ movl 200(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl 192(%esp), %esi
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl 1104(%esp), %edi
+ movl %ebp, 52(%edi)
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl %edi, 40(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 64(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 56(%edx), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 120(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 124(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 128(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp
+ movl 144(%esp), %edi
+ movl 140(%esp), %esi
+ movl 136(%esp), %edx
+ movl 132(%esp), %ecx
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebx # 4-byte Reload
+ movl %ebx, 56(%eax)
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl %ebx, 60(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %ebx # 4-byte Reload
+ movl %ebx, 64(%eax)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 76(%eax)
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 80(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, 84(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 92(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl %ecx, 108(%eax)
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 112(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 116(%eax)
+ addl $1084, %esp # imm = 0x43C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end225:
+ .size mcl_fpDbl_sqrPre15L, .Lfunc_end225-mcl_fpDbl_sqrPre15L
+
+ .globl mcl_fp_mont15L
+ .align 16, 0x90
+ .type mcl_fp_mont15L,@function
+mcl_fp_mont15L: # @mcl_fp_mont15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2044, %esp # imm = 0x7FC
+ calll .L226$pb
+.L226$pb:
+ popl %ebx
+.Ltmp47:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp47-.L226$pb), %ebx
+ movl 2076(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1976(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 1976(%esp), %ebp
+ movl 1980(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2036(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 2032(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2028(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 2024(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2020(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2016(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2012(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2008(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 2004(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2000(%esp), %edi
+ movl 1996(%esp), %esi
+ movl 1992(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 1988(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1984(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1912(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ addl 1912(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1916(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1920(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1924(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1928(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1932(%esp), %esi
+ adcl 1936(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1944(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1948(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1956(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1960(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1964(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1968(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1972(%esp), %ebp
+ sbbl %eax, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1848(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 116(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 88(%esp), %edx # 4-byte Reload
+ addl 1848(%esp), %edx
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1852(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1856(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1860(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1864(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1868(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1872(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1876(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1880(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1884(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1888(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1892(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1896(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1900(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1904(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ adcl 1908(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1784(%esp), %ecx
+ movl 2076(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1784(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1804(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1812(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1836(%esp), %esi
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1840(%esp), %ebp
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1844(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1720(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 1720(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1724(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1728(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1732(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1740(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1744(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1756(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 1768(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl 1772(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1780(%esp), %esi
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1656(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ addl 1656(%esp), %eax
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1664(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1680(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 1688(%esp), %ebp
+ adcl 1692(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1696(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1700(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1704(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1708(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 1712(%esp), %edi
+ adcl 1716(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1592(%esp), %ecx
+ movl 2068(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1592(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1600(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1604(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1608(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1620(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1628(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1640(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1644(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1528(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1528(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1544(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1564(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 1568(%esp), %edi
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1572(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1576(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1580(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1584(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1464(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 92(%esp), %ecx # 4-byte Reload
+ addl 1464(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1476(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1484(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1488(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1496(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1500(%esp), %edi
+ movl %edi, 112(%esp) # 4-byte Spill
+ adcl 1504(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 1512(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1516(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1400(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 92(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1400(%esp), %edi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1404(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1408(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1412(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1416(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 1420(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1424(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1428(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 1432(%esp), %edi
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1436(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1440(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 1448(%esp), %esi
+ movl %esi, %ebp
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1452(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1460(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1336(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1336(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1364(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1380(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ adcl 1384(%esp), %esi
+ movl %esi, %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1392(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1272(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 80(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1272(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1276(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1280(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1284(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1288(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1300(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1312(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 1320(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1328(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 2072(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 1208(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1232(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1244(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1264(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1268(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ addl 1144(%esp), %eax
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1156(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1168(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1180(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1196(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1080(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 1080(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1088(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1092(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1128(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1016(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1028(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1032(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1044(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 1060(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 952(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 964(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 976(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 992(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl %ebp, %eax
+ andl $1, %eax
+ addl 888(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 892(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 896(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 900(%esp), %edi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 904(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 908(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 912(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 916(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 920(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 924(%esp), %ebp
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 928(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 932(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 936(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 940(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 944(%esp), %esi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 824(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 832(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 856(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 864(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 872(%esp), %edi
+ adcl 876(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 760(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 776(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 800(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 804(%esp), %ebp
+ adcl 808(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 816(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 696(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 708(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 736(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 748(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 752(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 632(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 656(%esp), %edi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 672(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 688(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 568(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 588(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 596(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 604(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 504(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 516(%esp), %edi
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 520(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 532(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 560(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 440(%esp), %ecx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 448(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ adcl 452(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 460(%esp), %edi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 492(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 376(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 388(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 396(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 404(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 416(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 312(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 320(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 336(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 348(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 352(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 248(%esp), %edi
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 252(%esp), %esi
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 256(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 288(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl %esi, %ecx
+ movl 96(%esp), %esi # 4-byte Reload
+ addl 184(%esp), %ecx
+ adcl 188(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 196(%esp), %ebp
+ adcl 200(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 104(%esp), %ebx # 4-byte Reload
+ andl $1, %ebx
+ addl 120(%esp), %edi
+ movl %ebp, %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 128(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 132(%esp), %edi
+ adcl 136(%esp), %esi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 156(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 160(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 168(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 172(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 176(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ adcl 180(%esp), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 104(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 2076(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %ecx
+ movl %edi, %eax
+ sbbl 8(%ebp), %eax
+ movl %esi, %ebx
+ sbbl 12(%ebp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ sbbl 16(%ebp), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 92(%esp), %ebx # 4-byte Reload
+ sbbl 20(%ebp), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ sbbl 24(%ebp), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ebp), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ sbbl 32(%ebp), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %ebx # 4-byte Reload
+ sbbl 36(%ebp), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ sbbl 40(%ebp), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ebx # 4-byte Reload
+ sbbl 44(%ebp), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 76(%esp), %ebx # 4-byte Reload
+ sbbl 48(%ebp), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl 52(%ebp), %ebx
+ movl %ebx, 88(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 56(%ebp), %ebx
+ movl %ebx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ebx # 4-byte Reload
+ movl 108(%esp), %ebp # 4-byte Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB226_2
+# BB#1:
+ movl %edx, %ebp
+.LBB226_2:
+ movl 2064(%esp), %edx
+ movl %ebp, (%edx)
+ testb %bl, %bl
+ movl 116(%esp), %ebp # 4-byte Reload
+ jne .LBB226_4
+# BB#3:
+ movl %ecx, %ebp
+.LBB226_4:
+ movl %ebp, 4(%edx)
+ jne .LBB226_6
+# BB#5:
+ movl %eax, %edi
+.LBB226_6:
+ movl %edi, 8(%edx)
+ jne .LBB226_8
+# BB#7:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB226_8:
+ movl %esi, 12(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ jne .LBB226_10
+# BB#9:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB226_10:
+ movl %eax, 16(%edx)
+ movl 92(%esp), %eax # 4-byte Reload
+ jne .LBB226_12
+# BB#11:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB226_12:
+ movl %eax, 20(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ jne .LBB226_14
+# BB#13:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB226_14:
+ movl %eax, 24(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ jne .LBB226_16
+# BB#15:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB226_16:
+ movl %eax, 28(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ jne .LBB226_18
+# BB#17:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB226_18:
+ movl %eax, 32(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB226_20
+# BB#19:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB226_20:
+ movl %eax, 36(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ jne .LBB226_22
+# BB#21:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB226_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ jne .LBB226_24
+# BB#23:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB226_24:
+ movl %eax, 44(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ jne .LBB226_26
+# BB#25:
+ movl 52(%esp), %eax # 4-byte Reload
+.LBB226_26:
+ movl %eax, 48(%edx)
+ movl 100(%esp), %eax # 4-byte Reload
+ jne .LBB226_28
+# BB#27:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB226_28:
+ movl %eax, 52(%edx)
+ movl 112(%esp), %eax # 4-byte Reload
+ jne .LBB226_30
+# BB#29:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB226_30:
+ movl %eax, 56(%edx)
+ addl $2044, %esp # imm = 0x7FC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end226:
+ .size mcl_fp_mont15L, .Lfunc_end226-mcl_fp_mont15L
+
+ .globl mcl_fp_montNF15L
+ .align 16, 0x90
+ .type mcl_fp_montNF15L,@function
+mcl_fp_montNF15L: # @mcl_fp_montNF15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2028, %esp # imm = 0x7EC
+ calll .L227$pb
+.L227$pb:
+ popl %ebx
+.Ltmp48:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp48-.L227$pb), %ebx
+ movl 2060(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1960(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1960(%esp), %ebp
+ movl 1964(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2020(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2016(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2012(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2008(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 2004(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2000(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1996(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1992(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1988(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1984(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1980(%esp), %esi
+ movl 1976(%esp), %edi
+ movl 1972(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1968(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1896(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1896(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1900(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1904(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1908(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1912(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1916(%esp), %esi
+ movl %esi, %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1920(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1924(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1928(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1932(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1936(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1944(%esp), %ebp
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1948(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1956(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1832(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1892(%esp), %eax
+ movl 92(%esp), %edx # 4-byte Reload
+ addl 1832(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1836(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1840(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1844(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1848(%esp), %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1852(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1856(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1860(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1864(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1868(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1872(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1876(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 1880(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 1884(%esp), %ebp
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1888(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1768(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1768(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1772(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1784(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1804(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, %esi
+ adcl 1820(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1824(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1704(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1764(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1704(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1708(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1712(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1716(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1720(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1724(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1728(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1732(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 1736(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1740(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ adcl 1748(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1752(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1756(%esp), %ebp
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1760(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1640(%esp), %ecx
+ movl 2060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ addl 1640(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1664(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1680(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, %esi
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1688(%esp), %edi
+ adcl 1692(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1696(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1700(%esp), %ebp
+ movl 2056(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1576(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1636(%esp), %eax
+ movl 88(%esp), %edx # 4-byte Reload
+ addl 1576(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1580(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1584(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1588(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1592(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1596(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1600(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1604(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1608(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1612(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1616(%esp), %esi
+ adcl 1620(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1624(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1628(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1632(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1512(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1512(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1516(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 1532(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1544(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1548(%esp), %ebp
+ adcl 1552(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1564(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1568(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1572(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1448(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1508(%esp), %eax
+ movl 72(%esp), %edx # 4-byte Reload
+ addl 1448(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1452(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1460(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 1464(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1468(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1472(%esp), %edi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1476(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1480(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 1484(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1488(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1492(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1496(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1500(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 1504(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1384(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1384(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1408(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1432(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1440(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1444(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1320(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1380(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 1320(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 1324(%esp), %ebp
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1328(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1352(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1368(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1256(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 1256(%esp), %eax
+ adcl 1260(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl 1264(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1272(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 1296(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1304(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1312(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1192(%esp), %ecx
+ movl 2052(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ movl 1252(%esp), %eax
+ movl 48(%esp), %edx # 4-byte Reload
+ addl 1192(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1196(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1200(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 1204(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1208(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1212(%esp), %ebp
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1216(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1224(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1228(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1244(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1248(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1128(%esp), %ecx
+ movl 2060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ addl 1128(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1140(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1148(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1168(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1184(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 1188(%esp), %esi
+ movl 2056(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1064(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1124(%esp), %eax
+ movl 44(%esp), %edx # 4-byte Reload
+ addl 1064(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 1072(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1084(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1088(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1092(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1096(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 1100(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1104(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1108(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1112(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1116(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1120(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %esi
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1000(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1000(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1012(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1020(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1028(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1060(%esp), %esi
+ movl 2056(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 996(%esp), %eax
+ movl 52(%esp), %edx # 4-byte Reload
+ addl 936(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 940(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 944(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 952(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 956(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 960(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 964(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 968(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 972(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 976(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 980(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 984(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 988(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 992(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %esi
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 872(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 872(%esp), %edi
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 876(%esp), %ebp
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 880(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 932(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 808(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 868(%esp), %eax
+ movl %ebp, %ecx
+ addl 808(%esp), %ecx
+ adcl 812(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 816(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 820(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 824(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 828(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 832(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 836(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 840(%esp), %edi
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 844(%esp), %esi
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 848(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 852(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 856(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 860(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 864(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 744(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 744(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 776(%esp), %edi
+ adcl 780(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 792(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 680(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 740(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 680(%esp), %ecx
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 684(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 688(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 692(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 696(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ adcl 700(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 704(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 708(%esp), %edi
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 712(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 716(%esp), %ebp
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 720(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 724(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 728(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 732(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 736(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 616(%esp), %esi
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 620(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 644(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 648(%esp), %edi
+ adcl 652(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 612(%esp), %edx
+ movl %esi, %ecx
+ addl 552(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 572(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 580(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 588(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 488(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 508(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 512(%esp), %edi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 528(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 484(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 424(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 440(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ adcl 444(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %esi, %edi
+ adcl 460(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 360(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 368(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 376(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 396(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 400(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 296(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 356(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 296(%esp), %ecx
+ adcl 300(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 308(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 332(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 336(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 232(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 240(%esp), %ebp
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 244(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 272(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 276(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 228(%esp), %edx
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 168(%esp), %ecx
+ adcl 172(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ adcl 176(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 188(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 208(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 2060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ addl 104(%esp), %edi
+ movl 68(%esp), %edi # 4-byte Reload
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 112(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ adcl 116(%esp), %edi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 120(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 124(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 128(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 132(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 140(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 148(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 152(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 160(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 164(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 2060(%esp), %ecx
+ subl (%ecx), %edx
+ movl %ebx, %ebp
+ sbbl 4(%ecx), %ebp
+ movl %edi, %ebx
+ sbbl 8(%ecx), %ebx
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 12(%ecx), %eax
+ sbbl 16(%ecx), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ sbbl 20(%ecx), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 24(%ecx), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 28(%ecx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 32(%ecx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 36(%ecx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ sbbl 40(%ecx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ sbbl 44(%ecx), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ sbbl 48(%ecx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ sbbl 52(%ecx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ sbbl 56(%ecx), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ movl 100(%esp), %ecx # 4-byte Reload
+ js .LBB227_2
+# BB#1:
+ movl %edx, %ecx
+.LBB227_2:
+ movl 2048(%esp), %edx
+ movl %ecx, (%edx)
+ movl 92(%esp), %esi # 4-byte Reload
+ js .LBB227_4
+# BB#3:
+ movl %ebp, %esi
+.LBB227_4:
+ movl %esi, 4(%edx)
+ movl 88(%esp), %ecx # 4-byte Reload
+ js .LBB227_6
+# BB#5:
+ movl %ebx, %edi
+.LBB227_6:
+ movl %edi, 8(%edx)
+ js .LBB227_8
+# BB#7:
+ movl %eax, %ecx
+.LBB227_8:
+ movl %ecx, 12(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB227_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB227_10:
+ movl %eax, 16(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB227_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB227_12:
+ movl %eax, 20(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB227_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB227_14:
+ movl %eax, 24(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB227_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB227_16:
+ movl %eax, 28(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB227_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB227_18:
+ movl %eax, 32(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB227_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB227_20:
+ movl %eax, 36(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB227_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB227_22:
+ movl %eax, 40(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB227_24
+# BB#23:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB227_24:
+ movl %eax, 44(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB227_26
+# BB#25:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB227_26:
+ movl %eax, 48(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB227_28
+# BB#27:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB227_28:
+ movl %eax, 52(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB227_30
+# BB#29:
+ movl 68(%esp), %eax # 4-byte Reload
+.LBB227_30:
+ movl %eax, 56(%edx)
+ addl $2028, %esp # imm = 0x7EC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end227:
+ .size mcl_fp_montNF15L, .Lfunc_end227-mcl_fp_montNF15L
+
+ .globl mcl_fp_montRed15L
+ .align 16, 0x90
+ .type mcl_fp_montRed15L,@function
+mcl_fp_montRed15L: # @mcl_fp_montRed15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1148, %esp # imm = 0x47C
+ calll .L228$pb
+.L228$pb:
+ popl %eax
+.Ltmp49:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp49-.L228$pb), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1176(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 1172(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 116(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 100(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 96(%ecx), %esi
+ movl %esi, 152(%esp) # 4-byte Spill
+ movl 92(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 84(%ecx), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 80(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 168(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 164(%esp) # 4-byte Spill
+ movl 68(%ecx), %esi
+ movl %esi, 176(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 44(%ecx), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 12(%ecx), %edi
+ movl 8(%ecx), %esi
+ movl (%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 56(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 1080(%esp), %ecx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 80(%esp), %eax # 4-byte Reload
+ addl 1080(%esp), %eax
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ adcl 1088(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl 1092(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1108(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1016(%esp), %esi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1020(%esp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 1060(%esp), %ebp
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %esi
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 148(%esp) # 4-byte Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 952(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 956(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 992(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %ebp # 4-byte Reload
+ adcl 1004(%esp), %ebp
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 888(%esp), %esi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 892(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl 936(%esp), %ebp
+ movl %ebp, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 160(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 824(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 828(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 760(%esp), %esi
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 764(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 696(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 152(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 632(%esp), %edi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 636(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %ebp # 4-byte Reload
+ adcl 672(%esp), %ebp
+ movl 164(%esp), %edi # 4-byte Reload
+ adcl 676(%esp), %edi
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 568(%esp), %esi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 572(%esp), %ecx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ adcl 604(%esp), %ebp
+ movl %ebp, 176(%esp) # 4-byte Spill
+ adcl 608(%esp), %edi
+ movl %edi, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %edi # 4-byte Reload
+ adcl 616(%esp), %edi
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1176(%esp), %eax
+ movl %eax, %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 504(%esp), %esi
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 508(%esp), %ecx
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %esi # 4-byte Reload
+ adcl 524(%esp), %esi
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl 548(%esp), %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 440(%esp), %edi
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %edi # 4-byte Reload
+ adcl 452(%esp), %edi
+ adcl 456(%esp), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %esi # 4-byte Reload
+ adcl 464(%esp), %esi
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 376(%esp), %ebp
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 380(%esp), %ebp
+ adcl 384(%esp), %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %edi # 4-byte Reload
+ adcl 392(%esp), %edi
+ adcl 396(%esp), %esi
+ movl %esi, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %esi # 4-byte Reload
+ adcl 412(%esp), %esi
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 312(%esp), %ebp
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ adcl 324(%esp), %edi
+ movl %edi, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %ecx # 4-byte Reload
+ adcl 328(%esp), %ecx
+ movl %ecx, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl %ecx, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %ecx # 4-byte Reload
+ adcl 336(%esp), %ecx
+ movl %ecx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 340(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ adcl 344(%esp), %esi
+ movl %esi, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %ecx # 4-byte Reload
+ adcl 348(%esp), %ecx
+ movl %ecx, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp # 4-byte Reload
+ adcl 352(%esp), %ebp
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 356(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 360(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 364(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 368(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %eax, %edi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 248(%esp), %edi
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ adcl 284(%esp), %ebp
+ movl %ebp, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 288(%esp), %edi
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 308(%esp), %ebp
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 184(%esp), %esi
+ movl 172(%esp), %edx # 4-byte Reload
+ adcl 188(%esp), %edx
+ movl %edx, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %ecx # 4-byte Reload
+ adcl 192(%esp), %ecx
+ movl %ecx, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %esi # 4-byte Reload
+ adcl 204(%esp), %esi
+ movl %esi, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl 220(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 240(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %edx, %eax
+ subl 16(%esp), %edx # 4-byte Folded Reload
+ sbbl 4(%esp), %ecx # 4-byte Folded Reload
+ movl 176(%esp), %eax # 4-byte Reload
+ sbbl 8(%esp), %eax # 4-byte Folded Reload
+ movl 164(%esp), %ebp # 4-byte Reload
+ sbbl 12(%esp), %ebp # 4-byte Folded Reload
+ sbbl 20(%esp), %esi # 4-byte Folded Reload
+ movl 144(%esp), %edi # 4-byte Reload
+ sbbl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 160(%esp), %edi # 4-byte Reload
+ sbbl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 148(%esp), %edi # 4-byte Reload
+ sbbl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ sbbl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 152(%esp), %edi # 4-byte Reload
+ sbbl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ sbbl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 112(%esp) # 4-byte Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ sbbl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ sbbl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ sbbl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ sbbl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 156(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ movl %ebx, %edi
+ jne .LBB228_2
+# BB#1:
+ movl %edx, 172(%esp) # 4-byte Spill
+.LBB228_2:
+ movl 1168(%esp), %edx
+ movl 172(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%edx)
+ movl %edi, %ebx
+ testb %bl, %bl
+ jne .LBB228_4
+# BB#3:
+ movl %ecx, 180(%esp) # 4-byte Spill
+.LBB228_4:
+ movl 180(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%edx)
+ movl 176(%esp), %ecx # 4-byte Reload
+ jne .LBB228_6
+# BB#5:
+ movl %eax, %ecx
+.LBB228_6:
+ movl %ecx, 8(%edx)
+ movl 164(%esp), %eax # 4-byte Reload
+ jne .LBB228_8
+# BB#7:
+ movl %ebp, %eax
+.LBB228_8:
+ movl %eax, 12(%edx)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl 148(%esp), %eax # 4-byte Reload
+ movl 168(%esp), %ebp # 4-byte Reload
+ jne .LBB228_10
+# BB#9:
+ movl %esi, %ebp
+.LBB228_10:
+ movl %ebp, 16(%edx)
+ movl 152(%esp), %ebp # 4-byte Reload
+ movl 144(%esp), %ebx # 4-byte Reload
+ jne .LBB228_12
+# BB#11:
+ movl 84(%esp), %ebx # 4-byte Reload
+.LBB228_12:
+ movl %ebx, 20(%edx)
+ movl 132(%esp), %ebx # 4-byte Reload
+ movl 160(%esp), %edi # 4-byte Reload
+ jne .LBB228_14
+# BB#13:
+ movl 88(%esp), %edi # 4-byte Reload
+.LBB228_14:
+ movl %edi, 24(%edx)
+ movl 128(%esp), %edi # 4-byte Reload
+ jne .LBB228_16
+# BB#15:
+ movl 92(%esp), %eax # 4-byte Reload
+.LBB228_16:
+ movl %eax, 28(%edx)
+ movl 116(%esp), %esi # 4-byte Reload
+ jne .LBB228_18
+# BB#17:
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, 136(%esp) # 4-byte Spill
+.LBB228_18:
+ movl 136(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%edx)
+ jne .LBB228_20
+# BB#19:
+ movl 100(%esp), %ebp # 4-byte Reload
+.LBB228_20:
+ movl %ebp, 36(%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB228_22
+# BB#21:
+ movl 112(%esp), %ebx # 4-byte Reload
+.LBB228_22:
+ movl %ebx, 40(%edx)
+ jne .LBB228_24
+# BB#23:
+ movl 120(%esp), %edi # 4-byte Reload
+.LBB228_24:
+ movl %edi, 44(%edx)
+ jne .LBB228_26
+# BB#25:
+ movl 124(%esp), %esi # 4-byte Reload
+.LBB228_26:
+ movl %esi, 48(%edx)
+ jne .LBB228_28
+# BB#27:
+ movl 140(%esp), %eax # 4-byte Reload
+.LBB228_28:
+ movl %eax, 52(%edx)
+ jne .LBB228_30
+# BB#29:
+ movl 156(%esp), %ecx # 4-byte Reload
+.LBB228_30:
+ movl %ecx, 56(%edx)
+ addl $1148, %esp # imm = 0x47C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end228:
+ .size mcl_fp_montRed15L, .Lfunc_end228-mcl_fp_montRed15L
+
+ .globl mcl_fp_addPre15L
+ .align 16, 0x90
+ .type mcl_fp_addPre15L,@function
+mcl_fp_addPre15L: # @mcl_fp_addPre15L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %ebx
+ adcl 8(%ecx), %ebx
+ movl 16(%esp), %edi
+ movl %edx, (%edi)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%edi)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %ebx, 8(%edi)
+ movl 20(%eax), %ebx
+ movl %edx, 12(%edi)
+ movl 20(%ecx), %edx
+ adcl %ebx, %edx
+ movl 24(%eax), %ebx
+ movl %esi, 16(%edi)
+ movl 24(%ecx), %esi
+ adcl %ebx, %esi
+ movl 28(%eax), %ebx
+ movl %edx, 20(%edi)
+ movl 28(%ecx), %edx
+ adcl %ebx, %edx
+ movl 32(%eax), %ebx
+ movl %esi, 24(%edi)
+ movl 32(%ecx), %esi
+ adcl %ebx, %esi
+ movl 36(%eax), %ebx
+ movl %edx, 28(%edi)
+ movl 36(%ecx), %edx
+ adcl %ebx, %edx
+ movl 40(%eax), %ebx
+ movl %esi, 32(%edi)
+ movl 40(%ecx), %esi
+ adcl %ebx, %esi
+ movl 44(%eax), %ebx
+ movl %edx, 36(%edi)
+ movl 44(%ecx), %edx
+ adcl %ebx, %edx
+ movl 48(%eax), %ebx
+ movl %esi, 40(%edi)
+ movl 48(%ecx), %esi
+ adcl %ebx, %esi
+ movl 52(%eax), %ebx
+ movl %edx, 44(%edi)
+ movl 52(%ecx), %edx
+ adcl %ebx, %edx
+ movl %esi, 48(%edi)
+ movl %edx, 52(%edi)
+ movl 56(%eax), %eax
+ movl 56(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 56(%edi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end229:
+ .size mcl_fp_addPre15L, .Lfunc_end229-mcl_fp_addPre15L
+
+ .globl mcl_fp_subPre15L
+ .align 16, 0x90
+ .type mcl_fp_subPre15L,@function
+mcl_fp_subPre15L: # @mcl_fp_subPre15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebp
+ sbbl 8(%edx), %ebp
+ movl 20(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebx)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebp, 8(%ebx)
+ movl 20(%edx), %ebp
+ movl %esi, 12(%ebx)
+ movl 20(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 24(%edx), %ebp
+ movl %edi, 16(%ebx)
+ movl 24(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 28(%edx), %ebp
+ movl %esi, 20(%ebx)
+ movl 28(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 32(%edx), %ebp
+ movl %edi, 24(%ebx)
+ movl 32(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 36(%edx), %ebp
+ movl %esi, 28(%ebx)
+ movl 36(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 40(%edx), %ebp
+ movl %edi, 32(%ebx)
+ movl 40(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 44(%edx), %ebp
+ movl %esi, 36(%ebx)
+ movl 44(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 48(%edx), %ebp
+ movl %edi, 40(%ebx)
+ movl 48(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 52(%edx), %ebp
+ movl %esi, 44(%ebx)
+ movl 52(%ecx), %esi
+ sbbl %ebp, %esi
+ movl %edi, 48(%ebx)
+ movl %esi, 52(%ebx)
+ movl 56(%edx), %edx
+ movl 56(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 56(%ebx)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end230:
+ .size mcl_fp_subPre15L, .Lfunc_end230-mcl_fp_subPre15L
+
+ .globl mcl_fp_shr1_15L
+ .align 16, 0x90
+ .type mcl_fp_shr1_15L,@function
+mcl_fp_shr1_15L: # @mcl_fp_shr1_15L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 48(%ecx)
+ movl 56(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl %esi, 52(%ecx)
+ shrl %eax
+ movl %eax, 56(%ecx)
+ popl %esi
+ retl
+.Lfunc_end231:
+ .size mcl_fp_shr1_15L, .Lfunc_end231-mcl_fp_shr1_15L
+
+ .globl mcl_fp_add15L
+ .align 16, 0x90
+ .type mcl_fp_add15L,@function
+mcl_fp_add15L: # @mcl_fp_add15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $48, %esp
+ movl 76(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 72(%esp), %eax
+ addl (%eax), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ adcl 4(%eax), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 8(%ecx), %edx
+ adcl 8(%eax), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 12(%eax), %esi
+ movl 16(%eax), %edx
+ adcl 12(%ecx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 16(%ecx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 20(%eax), %edx
+ adcl 20(%ecx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 24(%eax), %edx
+ adcl 24(%ecx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 28(%eax), %edx
+ adcl 28(%ecx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 32(%eax), %edx
+ adcl 32(%ecx), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 36(%eax), %edx
+ adcl 36(%ecx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 40(%eax), %edx
+ adcl 40(%ecx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 44(%eax), %ebx
+ adcl 44(%ecx), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 48(%eax), %ebp
+ adcl 48(%ecx), %ebp
+ movl 52(%eax), %edi
+ adcl 52(%ecx), %edi
+ movl 56(%eax), %edx
+ adcl 56(%ecx), %edx
+ movl 68(%esp), %ecx
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ecx)
+ movl 44(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%ecx)
+ movl 40(%esp), %esi # 4-byte Reload
+ movl %esi, 8(%ecx)
+ movl 36(%esp), %esi # 4-byte Reload
+ movl %esi, 12(%ecx)
+ movl 32(%esp), %esi # 4-byte Reload
+ movl %esi, 16(%ecx)
+ movl 28(%esp), %esi # 4-byte Reload
+ movl %esi, 20(%ecx)
+ movl 24(%esp), %esi # 4-byte Reload
+ movl %esi, 24(%ecx)
+ movl 20(%esp), %esi # 4-byte Reload
+ movl %esi, 28(%ecx)
+ movl 16(%esp), %esi # 4-byte Reload
+ movl %esi, 32(%ecx)
+ movl 12(%esp), %esi # 4-byte Reload
+ movl %esi, 36(%ecx)
+ movl 8(%esp), %esi # 4-byte Reload
+ movl %esi, 40(%ecx)
+ movl %ebx, 44(%ecx)
+ movl %ebp, 48(%ecx)
+ movl %edi, 52(%ecx)
+ movl %edx, 56(%ecx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 80(%esp), %esi
+ subl (%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 4(%esi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ sbbl 8(%esi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ sbbl 12(%esi), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ sbbl 16(%esi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ sbbl 20(%esi), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ sbbl 24(%esi), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %edx # 4-byte Reload
+ sbbl 28(%esi), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %edx # 4-byte Reload
+ sbbl 32(%esi), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %edx # 4-byte Reload
+ sbbl 36(%esi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %edx # 4-byte Reload
+ sbbl 40(%esi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl (%esp), %edx # 4-byte Reload
+ sbbl 44(%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ sbbl 48(%esi), %ebp
+ sbbl 52(%esi), %edi
+ sbbl 56(%esi), %eax
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB232_2
+# BB#1: # %nocarry
+ movl 4(%esp), %edx # 4-byte Reload
+ movl %edx, (%ecx)
+ movl 44(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%ecx)
+ movl 40(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%ecx)
+ movl 36(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%ecx)
+ movl 32(%esp), %edx # 4-byte Reload
+ movl %edx, 16(%ecx)
+ movl 28(%esp), %edx # 4-byte Reload
+ movl %edx, 20(%ecx)
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 24(%ecx)
+ movl 20(%esp), %edx # 4-byte Reload
+ movl %edx, 28(%ecx)
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 32(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ movl %edx, 36(%ecx)
+ movl 8(%esp), %edx # 4-byte Reload
+ movl %edx, 40(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ movl %edx, 44(%ecx)
+ movl %ebp, 48(%ecx)
+ movl %edi, 52(%ecx)
+ movl %eax, 56(%ecx)
+.LBB232_2: # %carry
+ addl $48, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end232:
+ .size mcl_fp_add15L, .Lfunc_end232-mcl_fp_add15L
+
+ .globl mcl_fp_addNF15L
+ .align 16, 0x90
+ .type mcl_fp_addNF15L,@function
+mcl_fp_addNF15L: # @mcl_fp_addNF15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $120, %esp
+ movl 148(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %edx
+ movl 144(%esp), %esi
+ addl (%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 4(%esi), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 52(%ecx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 44(%ecx), %ebp
+ movl 40(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ movl 20(%ecx), %ebx
+ movl 16(%ecx), %edi
+ movl 12(%ecx), %edx
+ movl 8(%ecx), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 12(%esi), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebx
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl 24(%esi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 28(%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 32(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esi), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 40(%esi), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 44(%esi), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 48(%esi), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 52(%esi), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 56(%esi), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 152(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ subl (%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ sbbl 4(%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 8(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ sbbl 16(%esi), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 20(%esi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 24(%esi), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ sbbl 28(%esi), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ sbbl 32(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 36(%esi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ sbbl 40(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ movl %edx, %eax
+ sbbl 44(%esi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, %edi
+ sbbl 48(%esi), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ movl %ecx, %ebx
+ sbbl 52(%esi), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ sbbl 56(%esi), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl %edi, %esi
+ sarl $31, %esi
+ testl %esi, %esi
+ movl 80(%esp), %esi # 4-byte Reload
+ js .LBB233_2
+# BB#1:
+ movl (%esp), %esi # 4-byte Reload
+.LBB233_2:
+ movl 140(%esp), %edi
+ movl %esi, (%edi)
+ movl 84(%esp), %ecx # 4-byte Reload
+ js .LBB233_4
+# BB#3:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB233_4:
+ movl %ecx, 4(%edi)
+ movl 104(%esp), %ecx # 4-byte Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ js .LBB233_6
+# BB#5:
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+.LBB233_6:
+ movl 76(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%edi)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB233_8
+# BB#7:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB233_8:
+ movl %eax, 12(%edi)
+ movl %ebx, %ebp
+ movl %edx, %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ js .LBB233_10
+# BB#9:
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB233_10:
+ movl %edx, 16(%edi)
+ movl 112(%esp), %edx # 4-byte Reload
+ movl 108(%esp), %ebx # 4-byte Reload
+ js .LBB233_12
+# BB#11:
+ movl 20(%esp), %esi # 4-byte Reload
+.LBB233_12:
+ movl %esi, 20(%edi)
+ js .LBB233_14
+# BB#13:
+ movl 24(%esp), %esi # 4-byte Reload
+ movl %esi, 88(%esp) # 4-byte Spill
+.LBB233_14:
+ movl 88(%esp), %esi # 4-byte Reload
+ movl %esi, 24(%edi)
+ js .LBB233_16
+# BB#15:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB233_16:
+ movl %ecx, 28(%edi)
+ js .LBB233_18
+# BB#17:
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 116(%esp) # 4-byte Spill
+.LBB233_18:
+ movl 116(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%edi)
+ js .LBB233_20
+# BB#19:
+ movl 36(%esp), %ebx # 4-byte Reload
+.LBB233_20:
+ movl %ebx, 36(%edi)
+ js .LBB233_22
+# BB#21:
+ movl 40(%esp), %edx # 4-byte Reload
+.LBB233_22:
+ movl %edx, 40(%edi)
+ js .LBB233_24
+# BB#23:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB233_24:
+ movl %eax, 44(%edi)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB233_26
+# BB#25:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB233_26:
+ movl %eax, 48(%edi)
+ js .LBB233_28
+# BB#27:
+ movl 52(%esp), %ebp # 4-byte Reload
+.LBB233_28:
+ movl %ebp, 52(%edi)
+ movl 100(%esp), %eax # 4-byte Reload
+ js .LBB233_30
+# BB#29:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB233_30:
+ movl %eax, 56(%edi)
+ addl $120, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end233:
+ .size mcl_fp_addNF15L, .Lfunc_end233-mcl_fp_addNF15L
+
+ .globl mcl_fp_sub15L
+ .align 16, 0x90
+ .type mcl_fp_sub15L,@function
+mcl_fp_sub15L: # @mcl_fp_sub15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 80(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 84(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ sbbl 32(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 40(%esi), %edx
+ sbbl 40(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 44(%esi), %ecx
+ sbbl 44(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 48(%esi), %eax
+ sbbl 48(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 52(%esi), %ebp
+ sbbl 52(%edi), %ebp
+ movl 56(%esi), %esi
+ sbbl 56(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 76(%esp), %ebx
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%ebx)
+ movl %edx, 40(%ebx)
+ movl %ecx, 44(%ebx)
+ movl %eax, 48(%ebx)
+ movl %ebp, 52(%ebx)
+ movl %esi, 56(%ebx)
+ je .LBB234_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 88(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 8(%esi), %edi
+ movl 12(%esi), %eax
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 44(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl 48(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%ebx)
+ movl %ecx, 48(%ebx)
+ movl 52(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 52(%ebx)
+ movl 56(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%ebx)
+.LBB234_2: # %nocarry
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end234:
+ .size mcl_fp_sub15L, .Lfunc_end234-mcl_fp_sub15L
+
+ .globl mcl_fp_subNF15L
+ .align 16, 0x90
+ .type mcl_fp_subNF15L,@function
+mcl_fp_subNF15L: # @mcl_fp_subNF15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $96, %esp
+ movl 120(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 124(%esp), %edi
+ subl (%edi), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ sbbl 4(%edi), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 52(%ecx), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 32(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 32(%edi), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 52(%edi), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 56(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ sarl $31, %ebp
+ movl %ebp, %edi
+ shldl $1, %eax, %edi
+ movl 128(%esp), %edx
+ andl (%edx), %edi
+ movl 56(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 48(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 44(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 40(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 36(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 20(%edx), %ebx
+ andl %ebp, %ebx
+ movl 16(%edx), %esi
+ andl %ebp, %esi
+ movl 12(%edx), %ecx
+ andl %ebp, %ecx
+ movl 8(%edx), %eax
+ andl %ebp, %eax
+ andl 4(%edx), %ebp
+ addl 60(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl 116(%esp), %edx
+ movl %edi, (%edx)
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 4(%edx)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 8(%edx)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, 12(%edx)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 16(%edx)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 20(%edx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%edx)
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%edx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%edx)
+ movl 16(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%edx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%edx)
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%edx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 48(%edx)
+ movl %eax, 52(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%edx)
+ addl $96, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end235:
+ .size mcl_fp_subNF15L, .Lfunc_end235-mcl_fp_subNF15L
+
+ .globl mcl_fpDbl_add15L
+ .align 16, 0x90
+ .type mcl_fpDbl_add15L,@function
+mcl_fpDbl_add15L: # @mcl_fpDbl_add15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ movl 136(%esp), %ecx
+ movl 132(%esp), %edx
+ movl 12(%edx), %edi
+ movl 16(%edx), %esi
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edx), %ebp
+ movl 128(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edx), %ebp
+ adcl 8(%edx), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %esi
+ movl %ebp, 4(%eax)
+ movl 68(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%edx), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %esi, 16(%eax)
+ movl 24(%edx), %esi
+ adcl %ebx, %esi
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%edx), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %esi, 24(%eax)
+ movl 32(%edx), %esi
+ adcl %ebx, %esi
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%edx), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %esi, 32(%eax)
+ movl 40(%edx), %esi
+ adcl %ebx, %esi
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%edx), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %esi, 40(%eax)
+ movl 48(%edx), %esi
+ adcl %ebx, %esi
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%edx), %edi
+ adcl %ebx, %edi
+ movl 56(%ecx), %ebx
+ movl %esi, 48(%eax)
+ movl 56(%edx), %esi
+ adcl %ebx, %esi
+ movl 60(%ecx), %ebx
+ movl %edi, 52(%eax)
+ movl 60(%edx), %edi
+ adcl %ebx, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 64(%ecx), %edi
+ movl %esi, 56(%eax)
+ movl 64(%edx), %eax
+ adcl %edi, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%edx), %eax
+ adcl %ebp, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl 72(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl 76(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl 80(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl 84(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%ecx), %esi
+ movl 88(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%ecx), %esi
+ movl 92(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%ecx), %esi
+ movl 96(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%ecx), %esi
+ movl 100(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%ecx), %eax
+ movl 104(%edx), %esi
+ adcl %eax, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 108(%ecx), %edi
+ movl 108(%edx), %eax
+ adcl %edi, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 112(%ecx), %ebx
+ movl 112(%edx), %edi
+ adcl %ebx, %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 116(%ecx), %ecx
+ movl 116(%edx), %edx
+ adcl %ecx, %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 140(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ subl (%ebp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 40(%ebp), %ecx
+ sbbl 44(%ebp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ sbbl 48(%ebp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl %edx, %edi
+ sbbl 52(%ebp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %esi
+ sbbl 56(%ebp), %esi
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB236_2
+# BB#1:
+ movl %esi, %edi
+.LBB236_2:
+ testb %bl, %bl
+ movl 76(%esp), %eax # 4-byte Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ movl 68(%esp), %ebx # 4-byte Reload
+ movl 64(%esp), %ebp # 4-byte Reload
+ jne .LBB236_4
+# BB#3:
+ movl %ecx, %esi
+ movl (%esp), %ebx # 4-byte Reload
+ movl 4(%esp), %ebp # 4-byte Reload
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB236_4:
+ movl 128(%esp), %edx
+ movl %eax, 60(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 64(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%edx)
+ movl 88(%esp), %eax # 4-byte Reload
+ movl %eax, 72(%edx)
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%edx)
+ movl 100(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%edx)
+ movl %ebp, 92(%edx)
+ movl %ebx, 96(%edx)
+ movl %esi, 100(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ jne .LBB236_6
+# BB#5:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB236_6:
+ movl %eax, 104(%edx)
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ jne .LBB236_8
+# BB#7:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB236_8:
+ movl %eax, 108(%edx)
+ jne .LBB236_10
+# BB#9:
+ movl 48(%esp), %ecx # 4-byte Reload
+.LBB236_10:
+ movl %ecx, 112(%edx)
+ movl %edi, 116(%edx)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end236:
+ .size mcl_fpDbl_add15L, .Lfunc_end236-mcl_fpDbl_add15L
+
+ .globl mcl_fpDbl_sub15L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub15L,@function
+mcl_fpDbl_sub15L: # @mcl_fpDbl_sub15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 124(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 128(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %esi
+ movl 8(%eax), %edi
+ sbbl 8(%ebp), %edi
+ movl 120(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 12(%eax), %edx
+ sbbl 12(%ebp), %edx
+ movl %esi, 4(%ecx)
+ movl 16(%eax), %esi
+ sbbl 16(%ebp), %esi
+ movl %edi, 8(%ecx)
+ movl 20(%ebp), %edi
+ movl %edx, 12(%ecx)
+ movl 20(%eax), %edx
+ sbbl %edi, %edx
+ movl 24(%ebp), %edi
+ movl %esi, 16(%ecx)
+ movl 24(%eax), %esi
+ sbbl %edi, %esi
+ movl 28(%ebp), %edi
+ movl %edx, 20(%ecx)
+ movl 28(%eax), %edx
+ sbbl %edi, %edx
+ movl 32(%ebp), %edi
+ movl %esi, 24(%ecx)
+ movl 32(%eax), %esi
+ sbbl %edi, %esi
+ movl 36(%ebp), %edi
+ movl %edx, 28(%ecx)
+ movl 36(%eax), %edx
+ sbbl %edi, %edx
+ movl 40(%ebp), %edi
+ movl %esi, 32(%ecx)
+ movl 40(%eax), %esi
+ sbbl %edi, %esi
+ movl 44(%ebp), %edi
+ movl %edx, 36(%ecx)
+ movl 44(%eax), %edx
+ sbbl %edi, %edx
+ movl 48(%ebp), %edi
+ movl %esi, 40(%ecx)
+ movl 48(%eax), %esi
+ sbbl %edi, %esi
+ movl 52(%ebp), %edi
+ movl %edx, 44(%ecx)
+ movl 52(%eax), %edx
+ sbbl %edi, %edx
+ movl 56(%ebp), %edi
+ movl %esi, 48(%ecx)
+ movl 56(%eax), %esi
+ sbbl %edi, %esi
+ movl 60(%ebp), %edi
+ movl %edx, 52(%ecx)
+ movl 60(%eax), %edx
+ sbbl %edi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 64(%ebp), %edx
+ movl %esi, 56(%ecx)
+ movl 64(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 68(%ebp), %edx
+ movl 68(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 72(%ebp), %edx
+ movl 72(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 76(%ebp), %edx
+ movl 76(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 80(%ebp), %edx
+ movl 80(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 84(%ebp), %edx
+ movl 84(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 88(%ebp), %edx
+ movl 88(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 92(%ebp), %edx
+ movl 92(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 96(%ebp), %edx
+ movl 96(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 100(%ebp), %edx
+ movl 100(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 104(%ebp), %edx
+ movl 104(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 108(%ebp), %edx
+ movl 108(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 112(%ebp), %edx
+ movl 112(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 116(%ebp), %edx
+ movl 116(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 132(%esp), %esi
+ jne .LBB237_1
+# BB#2:
+ movl $0, 60(%esp) # 4-byte Folded Spill
+ jmp .LBB237_3
+.LBB237_1:
+ movl 56(%esi), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+.LBB237_3:
+ testb %al, %al
+ jne .LBB237_4
+# BB#5:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ movl $0, %ebx
+ jmp .LBB237_6
+.LBB237_4:
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB237_6:
+ jne .LBB237_7
+# BB#8:
+ movl $0, 32(%esp) # 4-byte Folded Spill
+ jmp .LBB237_9
+.LBB237_7:
+ movl 52(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+.LBB237_9:
+ jne .LBB237_10
+# BB#11:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB237_12
+.LBB237_10:
+ movl 48(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+.LBB237_12:
+ jne .LBB237_13
+# BB#14:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB237_15
+.LBB237_13:
+ movl 44(%esi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB237_15:
+ jne .LBB237_16
+# BB#17:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB237_18
+.LBB237_16:
+ movl 40(%esi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB237_18:
+ jne .LBB237_19
+# BB#20:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB237_21
+.LBB237_19:
+ movl 36(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB237_21:
+ jne .LBB237_22
+# BB#23:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB237_24
+.LBB237_22:
+ movl 32(%esi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB237_24:
+ jne .LBB237_25
+# BB#26:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB237_27
+.LBB237_25:
+ movl 28(%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB237_27:
+ jne .LBB237_28
+# BB#29:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB237_30
+.LBB237_28:
+ movl 24(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB237_30:
+ jne .LBB237_31
+# BB#32:
+ movl $0, %edx
+ jmp .LBB237_33
+.LBB237_31:
+ movl 20(%esi), %edx
+.LBB237_33:
+ jne .LBB237_34
+# BB#35:
+ movl $0, %ebp
+ jmp .LBB237_36
+.LBB237_34:
+ movl 16(%esi), %ebp
+.LBB237_36:
+ jne .LBB237_37
+# BB#38:
+ movl $0, %eax
+ jmp .LBB237_39
+.LBB237_37:
+ movl 12(%esi), %eax
+.LBB237_39:
+ jne .LBB237_40
+# BB#41:
+ xorl %esi, %esi
+ jmp .LBB237_42
+.LBB237_40:
+ movl 8(%esi), %esi
+.LBB237_42:
+ addl 44(%esp), %ebx # 4-byte Folded Reload
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 60(%ecx)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 64(%ecx)
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 68(%ecx)
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, 76(%ecx)
+ movl (%esp), %esi # 4-byte Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 80(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 84(%ecx)
+ movl 8(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 96(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 100(%ecx)
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 104(%ecx)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 108(%ecx)
+ movl %eax, 112(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%ecx)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end237:
+ .size mcl_fpDbl_sub15L, .Lfunc_end237-mcl_fpDbl_sub15L
+
+ .align 16, 0x90
+ .type .LmulPv512x32,@function
+.LmulPv512x32: # @mulPv512x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $112, %esp
+ movl %edx, %ebp
+ movl 132(%esp), %ebx
+ movl %ebx, %eax
+ mull 60(%ebp)
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 56(%ebp)
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 52(%ebp)
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 48(%ebp)
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 44(%ebp)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 40(%ebp)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 36(%ebp)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 32(%ebp)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 28(%ebp)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 24(%ebp)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 20(%ebp)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 16(%ebp)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 12(%ebp)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 8(%ebp)
+ movl %edx, %esi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 4(%ebp)
+ movl %edx, %edi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull (%ebp)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%ecx)
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%ecx)
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%ecx)
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 64(%ecx)
+ movl %ecx, %eax
+ addl $112, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end238:
+ .size .LmulPv512x32, .Lfunc_end238-.LmulPv512x32
+
+ .globl mcl_fp_mulUnitPre16L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre16L,@function
+mcl_fp_mulUnitPre16L: # @mcl_fp_mulUnitPre16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $124, %esp
+ calll .L239$pb
+.L239$pb:
+ popl %ebx
+.Ltmp50:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp50-.L239$pb), %ebx
+ movl 152(%esp), %eax
+ movl %eax, (%esp)
+ leal 56(%esp), %ecx
+ movl 148(%esp), %edx
+ calll .LmulPv512x32
+ movl 120(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 108(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 104(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp
+ movl 72(%esp), %ebx
+ movl 68(%esp), %edi
+ movl 64(%esp), %esi
+ movl 56(%esp), %edx
+ movl 60(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 64(%eax)
+ addl $124, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end239:
+ .size mcl_fp_mulUnitPre16L, .Lfunc_end239-mcl_fp_mulUnitPre16L
+
+ .globl mcl_fpDbl_mulPre16L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre16L,@function
+mcl_fpDbl_mulPre16L: # @mcl_fpDbl_mulPre16L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $300, %esp # imm = 0x12C
+ calll .L240$pb
+.L240$pb:
+ popl %ebx
+.Ltmp51:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp51-.L240$pb), %ebx
+ movl %ebx, -224(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl 12(%ebp), %esi
+ movl %esi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre8L@PLT
+ leal 32(%edi), %eax
+ movl %eax, 8(%esp)
+ leal 32(%esi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 64(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre8L@PLT
+ movl 52(%esi), %ebx
+ movl 48(%esi), %eax
+ movl 44(%esi), %ecx
+ movl 40(%esi), %edx
+ movl %edx, -176(%ebp) # 4-byte Spill
+ movl (%esi), %edi
+ movl 4(%esi), %edx
+ addl 32(%esi), %edi
+ movl %edi, -184(%ebp) # 4-byte Spill
+ movl %esi, %edi
+ adcl 36(%edi), %edx
+ movl %edx, -236(%ebp) # 4-byte Spill
+ movl -176(%ebp), %edx # 4-byte Reload
+ adcl 8(%edi), %edx
+ movl %edx, -176(%ebp) # 4-byte Spill
+ adcl 12(%edi), %ecx
+ movl %ecx, -232(%ebp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ adcl 20(%edi), %ebx
+ movl %ebx, -228(%ebp) # 4-byte Spill
+ movl 56(%edi), %eax
+ adcl 24(%edi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %ecx
+ popl %eax
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl (%esi), %ecx
+ addl 32(%esi), %ecx
+ movl %ecx, -188(%ebp) # 4-byte Spill
+ movl 4(%esi), %ecx
+ adcl 36(%esi), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ movl 40(%esi), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, -196(%ebp) # 4-byte Spill
+ movl 44(%esi), %ecx
+ adcl 12(%esi), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ movl 48(%esi), %ecx
+ adcl 16(%esi), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ movl 52(%esi), %ecx
+ adcl 20(%esi), %ecx
+ movl %ecx, -208(%ebp) # 4-byte Spill
+ movl 56(%esi), %edx
+ adcl 24(%esi), %edx
+ movl 60(%esi), %ecx
+ adcl 28(%esi), %ecx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %ebx
+ popl %eax
+ movl %ebx, -252(%ebp) # 4-byte Spill
+ movl -212(%ebp), %ebx # 4-byte Reload
+ movl -176(%ebp), %esi # 4-byte Reload
+ movl %esi, -216(%ebp) # 4-byte Spill
+ movl -184(%ebp), %esi # 4-byte Reload
+ movl %esi, -220(%ebp) # 4-byte Spill
+ jb .LBB240_2
+# BB#1:
+ xorl %eax, %eax
+ xorl %ebx, %ebx
+ movl $0, -216(%ebp) # 4-byte Folded Spill
+ movl $0, -220(%ebp) # 4-byte Folded Spill
+.LBB240_2:
+ movl %ebx, -244(%ebp) # 4-byte Spill
+ movl %eax, -240(%ebp) # 4-byte Spill
+ movl 60(%edi), %eax
+ movl -144(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 28(%edi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl %edx, -144(%ebp) # 4-byte Spill
+ movl -208(%ebp), %eax # 4-byte Reload
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl -204(%ebp), %eax # 4-byte Reload
+ movl %eax, -152(%ebp) # 4-byte Spill
+ movl -200(%ebp), %eax # 4-byte Reload
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl -196(%ebp), %eax # 4-byte Reload
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl -192(%ebp), %eax # 4-byte Reload
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl -188(%ebp), %eax # 4-byte Reload
+ movl %eax, -168(%ebp) # 4-byte Spill
+ jb .LBB240_4
+# BB#3:
+ movl $0, -172(%ebp) # 4-byte Folded Spill
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ movl $0, -160(%ebp) # 4-byte Folded Spill
+ movl $0, -164(%ebp) # 4-byte Folded Spill
+ movl $0, -168(%ebp) # 4-byte Folded Spill
+.LBB240_4:
+ movl -184(%ebp), %eax # 4-byte Reload
+ movl %eax, -108(%ebp)
+ movl -236(%ebp), %eax # 4-byte Reload
+ movl %eax, -104(%ebp)
+ movl -176(%ebp), %edi # 4-byte Reload
+ movl %edi, -100(%ebp)
+ movl -232(%ebp), %edi # 4-byte Reload
+ movl %edi, -96(%ebp)
+ movl -212(%ebp), %esi # 4-byte Reload
+ movl %esi, -92(%ebp)
+ movl -228(%ebp), %esi # 4-byte Reload
+ movl %esi, -88(%ebp)
+ movl -248(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -84(%ebp)
+ movl -188(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -140(%ebp)
+ movl -192(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -136(%ebp)
+ movl -196(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -132(%ebp)
+ movl -200(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -128(%ebp)
+ movl -204(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -124(%ebp)
+ movl -208(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -120(%ebp)
+ movl %esi, %ebx
+ movl %edi, %esi
+ movl %eax, %edi
+ movl %edx, -116(%ebp)
+ movl %ecx, -112(%ebp)
+ sbbl %edx, %edx
+ movl -180(%ebp), %eax # 4-byte Reload
+ movl %eax, -80(%ebp)
+ movl -252(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB240_6
+# BB#5:
+ movl $0, %eax
+ movl $0, %ebx
+ movl $0, %esi
+ movl $0, %edi
+.LBB240_6:
+ movl %eax, -180(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -140(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -108(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -76(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -220(%ebp), %eax # 4-byte Reload
+ addl %eax, -168(%ebp) # 4-byte Folded Spill
+ adcl %edi, -164(%ebp) # 4-byte Folded Spill
+ movl -216(%ebp), %eax # 4-byte Reload
+ adcl %eax, -160(%ebp) # 4-byte Folded Spill
+ adcl %esi, -156(%ebp) # 4-byte Folded Spill
+ movl -244(%ebp), %eax # 4-byte Reload
+ adcl %eax, -152(%ebp) # 4-byte Folded Spill
+ adcl %ebx, -148(%ebp) # 4-byte Folded Spill
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -240(%ebp), %eax # 4-byte Folded Reload
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl -172(%ebp), %edi # 4-byte Reload
+ adcl -180(%ebp), %edi # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -176(%ebp) # 4-byte Spill
+ movl -224(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre8L@PLT
+ movl -168(%ebp), %eax # 4-byte Reload
+ addl -44(%ebp), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl -164(%ebp), %eax # 4-byte Reload
+ adcl -40(%ebp), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl -152(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ adcl -16(%ebp), %edi
+ movl %edi, -172(%ebp) # 4-byte Spill
+ adcl %esi, -176(%ebp) # 4-byte Folded Spill
+ movl -76(%ebp), %eax
+ movl 8(%ebp), %esi
+ subl (%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ movl -72(%ebp), %ecx
+ sbbl 4(%esi), %ecx
+ movl -68(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -192(%ebp) # 4-byte Spill
+ movl -64(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl -60(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl -56(%ebp), %eax
+ sbbl 20(%esi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ movl -52(%ebp), %eax
+ sbbl 24(%esi), %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ movl -48(%ebp), %eax
+ sbbl 28(%esi), %eax
+ movl %eax, -188(%ebp) # 4-byte Spill
+ movl 32(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -156(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ movl -144(%ebp), %edi # 4-byte Reload
+ sbbl %eax, %edi
+ movl 60(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ sbbl %eax, -172(%ebp) # 4-byte Folded Spill
+ sbbl $0, -176(%ebp) # 4-byte Folded Spill
+ movl 64(%esi), %eax
+ movl %eax, -260(%ebp) # 4-byte Spill
+ subl %eax, -196(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -264(%ebp) # 4-byte Spill
+ sbbl %eax, %ecx
+ movl 72(%esi), %eax
+ movl %eax, -268(%ebp) # 4-byte Spill
+ sbbl %eax, -192(%ebp) # 4-byte Folded Spill
+ movl 76(%esi), %eax
+ movl %eax, -272(%ebp) # 4-byte Spill
+ sbbl %eax, %edx
+ movl 80(%esi), %eax
+ movl %eax, -276(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 84(%esi), %eax
+ movl %eax, -280(%ebp) # 4-byte Spill
+ sbbl %eax, -180(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %eax
+ movl %eax, -284(%ebp) # 4-byte Spill
+ sbbl %eax, -184(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %eax
+ movl %eax, -288(%ebp) # 4-byte Spill
+ sbbl %eax, -188(%ebp) # 4-byte Folded Spill
+ movl 96(%esi), %eax
+ movl %eax, -292(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %eax
+ movl %eax, -236(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %eax
+ movl %eax, -240(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %eax
+ movl %eax, -244(%ebp) # 4-byte Spill
+ sbbl %eax, -156(%ebp) # 4-byte Folded Spill
+ movl 112(%esi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl 116(%esi), %eax
+ movl %eax, -252(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 120(%esi), %eax
+ movl %eax, -232(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl %edi, -144(%ebp) # 4-byte Spill
+ movl 124(%esi), %eax
+ movl %eax, -256(%ebp) # 4-byte Spill
+ sbbl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl -176(%ebp), %edi # 4-byte Reload
+ sbbl $0, %edi
+ movl -196(%ebp), %eax # 4-byte Reload
+ addl -200(%ebp), %eax # 4-byte Folded Reload
+ adcl -204(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%esi)
+ movl -192(%ebp), %eax # 4-byte Reload
+ adcl -208(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%esi)
+ adcl -212(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 40(%esi)
+ adcl -216(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 44(%esi)
+ movl -180(%ebp), %eax # 4-byte Reload
+ adcl -220(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 48(%esi)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ adcl -224(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 52(%esi)
+ movl -188(%ebp), %edx # 4-byte Reload
+ adcl -228(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 56(%esi)
+ movl -168(%ebp), %eax # 4-byte Reload
+ adcl -260(%ebp), %eax # 4-byte Folded Reload
+ movl %edx, 60(%esi)
+ movl -164(%ebp), %ecx # 4-byte Reload
+ adcl -264(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 64(%esi)
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -268(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 68(%esi)
+ movl -156(%ebp), %ecx # 4-byte Reload
+ adcl -272(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 72(%esi)
+ movl -152(%ebp), %eax # 4-byte Reload
+ adcl -276(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 76(%esi)
+ movl -148(%ebp), %ecx # 4-byte Reload
+ adcl -280(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 80(%esi)
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -284(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 84(%esi)
+ movl -172(%ebp), %ecx # 4-byte Reload
+ adcl -288(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 88(%esi)
+ adcl -292(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 92(%esi)
+ movl %edi, 96(%esi)
+ movl -236(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -240(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -244(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ movl -248(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 112(%esi)
+ movl -252(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 116(%esi)
+ movl -232(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 120(%esi)
+ movl -256(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 124(%esi)
+ addl $300, %esp # imm = 0x12C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end240:
+ .size mcl_fpDbl_mulPre16L, .Lfunc_end240-mcl_fpDbl_mulPre16L
+
+ .globl mcl_fpDbl_sqrPre16L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre16L,@function
+mcl_fpDbl_sqrPre16L: # @mcl_fpDbl_sqrPre16L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $300, %esp # imm = 0x12C
+ calll .L241$pb
+.L241$pb:
+ popl %ebx
+.Ltmp52:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp52-.L241$pb), %ebx
+ movl %ebx, -184(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre8L@PLT
+ leal 32(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 64(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre8L@PLT
+ movl 52(%edi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ movl 48(%edi), %eax
+ movl 44(%edi), %ebx
+ movl 40(%edi), %esi
+ movl (%edi), %ecx
+ movl 4(%edi), %edx
+ addl 32(%edi), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ adcl 36(%edi), %edx
+ movl %edx, -196(%ebp) # 4-byte Spill
+ adcl 8(%edi), %esi
+ movl %esi, -188(%ebp) # 4-byte Spill
+ adcl 12(%edi), %ebx
+ adcl 16(%edi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ movl -180(%ebp), %eax # 4-byte Reload
+ adcl 20(%edi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ addl %ecx, %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ adcl %edx, %edx
+ movl %edx, -160(%ebp) # 4-byte Spill
+ adcl %esi, %esi
+ movl %esi, -156(%ebp) # 4-byte Spill
+ movl %ebx, %edx
+ movl %ebx, %esi
+ adcl %edx, %edx
+ movl %edx, -152(%ebp) # 4-byte Spill
+ movl -208(%ebp), %eax # 4-byte Reload
+ movl %eax, %edx
+ movl %eax, %ebx
+ adcl %edx, %edx
+ movl %edx, -148(%ebp) # 4-byte Spill
+ movl -180(%ebp), %edx # 4-byte Reload
+ adcl %edx, %edx
+ movl %edx, -144(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl 56(%edi), %edx
+ movl -168(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ adcl 24(%edi), %edx
+ movl 60(%edi), %ecx
+ adcl 28(%edi), %ecx
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edi
+ sbbl %eax, %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ jb .LBB241_2
+# BB#1:
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ movl $0, -160(%ebp) # 4-byte Folded Spill
+ movl $0, -164(%ebp) # 4-byte Folded Spill
+.LBB241_2:
+ movl %edx, %eax
+ movl -172(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl %eax, %eax
+ movl %ecx, %edi
+ adcl %edi, %edi
+ movl %edi, -176(%ebp) # 4-byte Spill
+ movl -204(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB241_4
+# BB#3:
+ movl $0, -176(%ebp) # 4-byte Folded Spill
+ xorl %eax, %eax
+.LBB241_4:
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl -192(%ebp), %eax # 4-byte Reload
+ movl %eax, -108(%ebp)
+ movl %eax, -140(%ebp)
+ movl -196(%ebp), %eax # 4-byte Reload
+ movl %eax, -104(%ebp)
+ movl %eax, -136(%ebp)
+ movl -188(%ebp), %eax # 4-byte Reload
+ movl %eax, -100(%ebp)
+ movl %eax, -132(%ebp)
+ movl %esi, -96(%ebp)
+ movl %esi, -128(%ebp)
+ movl %ebx, -92(%ebp)
+ movl %ebx, -124(%ebp)
+ movl -180(%ebp), %eax # 4-byte Reload
+ movl %eax, -88(%ebp)
+ movl %eax, -120(%ebp)
+ movl %edx, -84(%ebp)
+ movl %edx, -116(%ebp)
+ movl %ecx, -80(%ebp)
+ movl %ecx, -112(%ebp)
+ movl -200(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB241_5
+# BB#6:
+ xorl %edi, %edi
+ jmp .LBB241_7
+.LBB241_5:
+ shrl $31, %ecx
+ movl %ecx, %edi
+.LBB241_7:
+ leal -140(%ebp), %eax
+ movl %eax, 8(%esp)
+ leal -108(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -76(%ebp), %eax
+ movl %eax, (%esp)
+ movl -168(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -184(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre8L@PLT
+ movl -164(%ebp), %eax # 4-byte Reload
+ addl -44(%ebp), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -40(%ebp), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl -152(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl -172(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl -176(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ adcl %edi, %esi
+ movl %esi, -168(%ebp) # 4-byte Spill
+ movl -76(%ebp), %ecx
+ movl 8(%ebp), %esi
+ subl (%esi), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ movl -72(%ebp), %edi
+ sbbl 4(%esi), %edi
+ movl -68(%ebp), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, -184(%ebp) # 4-byte Spill
+ movl -64(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, -192(%ebp) # 4-byte Spill
+ movl -60(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl %eax, %ecx
+ movl -56(%ebp), %eax
+ sbbl 20(%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ movl -52(%ebp), %edx
+ sbbl 24(%esi), %edx
+ movl %edx, -188(%ebp) # 4-byte Spill
+ movl -48(%ebp), %edx
+ sbbl 28(%esi), %edx
+ movl 32(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -156(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ sbbl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl 60(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ sbbl %eax, %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ movl -168(%ebp), %eax # 4-byte Reload
+ sbbl $0, %eax
+ movl 64(%esi), %ecx
+ movl %ecx, -260(%ebp) # 4-byte Spill
+ subl %ecx, -180(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %ecx
+ movl %ecx, -264(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 72(%esi), %ecx
+ movl %ecx, -268(%ebp) # 4-byte Spill
+ sbbl %ecx, -184(%ebp) # 4-byte Folded Spill
+ movl 76(%esi), %ecx
+ movl %ecx, -272(%ebp) # 4-byte Spill
+ sbbl %ecx, -192(%ebp) # 4-byte Folded Spill
+ movl 80(%esi), %ecx
+ movl %ecx, -276(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 84(%esi), %ecx
+ movl %ecx, -280(%ebp) # 4-byte Spill
+ sbbl %ecx, -196(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %ecx
+ movl %ecx, -284(%ebp) # 4-byte Spill
+ sbbl %ecx, -188(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %ecx
+ movl %ecx, -288(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 96(%esi), %ecx
+ movl %ecx, -292(%ebp) # 4-byte Spill
+ sbbl %ecx, -164(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %ecx
+ movl %ecx, -232(%ebp) # 4-byte Spill
+ sbbl %ecx, -160(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %ecx
+ movl %ecx, -236(%ebp) # 4-byte Spill
+ sbbl %ecx, -156(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %ecx
+ movl %ecx, -240(%ebp) # 4-byte Spill
+ sbbl %ecx, -152(%ebp) # 4-byte Folded Spill
+ movl 112(%esi), %ecx
+ movl %ecx, -244(%ebp) # 4-byte Spill
+ sbbl %ecx, -148(%ebp) # 4-byte Folded Spill
+ movl 116(%esi), %ecx
+ movl %ecx, -248(%ebp) # 4-byte Spill
+ sbbl %ecx, -144(%ebp) # 4-byte Folded Spill
+ movl 120(%esi), %ecx
+ movl %ecx, -252(%ebp) # 4-byte Spill
+ sbbl %ecx, -172(%ebp) # 4-byte Folded Spill
+ movl 124(%esi), %ecx
+ movl %ecx, -256(%ebp) # 4-byte Spill
+ sbbl %ecx, -176(%ebp) # 4-byte Folded Spill
+ sbbl $0, %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl -180(%ebp), %eax # 4-byte Reload
+ addl -200(%ebp), %eax # 4-byte Folded Reload
+ adcl -204(%ebp), %edi # 4-byte Folded Reload
+ movl %eax, 32(%esi)
+ movl -184(%ebp), %eax # 4-byte Reload
+ adcl -208(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 36(%esi)
+ movl -192(%ebp), %ecx # 4-byte Reload
+ adcl -212(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%esi)
+ adcl -216(%ebp), %ebx # 4-byte Folded Reload
+ movl %ecx, 44(%esi)
+ movl -196(%ebp), %ecx # 4-byte Reload
+ adcl -220(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 48(%esi)
+ movl -188(%ebp), %eax # 4-byte Reload
+ adcl -224(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl %edx, %ecx
+ adcl -228(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 56(%esi)
+ movl -164(%ebp), %eax # 4-byte Reload
+ adcl -260(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 60(%esi)
+ movl -160(%ebp), %ecx # 4-byte Reload
+ adcl -264(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 64(%esi)
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl -268(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 68(%esi)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -272(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 72(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -276(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 76(%esi)
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl -280(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 80(%esi)
+ movl -172(%ebp), %eax # 4-byte Reload
+ adcl -284(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 84(%esi)
+ movl -176(%ebp), %ecx # 4-byte Reload
+ adcl -288(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 88(%esi)
+ movl -168(%ebp), %eax # 4-byte Reload
+ adcl -292(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 92(%esi)
+ movl %eax, 96(%esi)
+ movl -232(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -236(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -240(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ movl -244(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 112(%esi)
+ movl -248(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 116(%esi)
+ movl -252(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 120(%esi)
+ movl -256(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 124(%esi)
+ addl $300, %esp # imm = 0x12C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end241:
+ .size mcl_fpDbl_sqrPre16L, .Lfunc_end241-mcl_fpDbl_sqrPre16L
+
+ .globl mcl_fp_mont16L
+ .align 16, 0x90
+ .type mcl_fp_mont16L,@function
+mcl_fp_mont16L: # @mcl_fp_mont16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2428, %esp # imm = 0x97C
+ calll .L242$pb
+.L242$pb:
+ popl %ebx
+.Ltmp53:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp53-.L242$pb), %ebx
+ movl 2460(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 2360(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 2360(%esp), %ebp
+ movl 2364(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2424(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 2420(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 2416(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 2412(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2408(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2404(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2400(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2396(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2392(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2388(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2384(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 2380(%esp), %edi
+ movl 2376(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2372(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2368(%esp), %esi
+ movl %eax, (%esp)
+ leal 2288(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ addl 2288(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2292(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 2296(%esp), %esi
+ movl %esi, %ebp
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2300(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2304(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2308(%esp), %edi
+ movl %edi, %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2312(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2316(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2320(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2328(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2332(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2340(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2344(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2348(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2352(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 2456(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 2216(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %edi
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 2216(%esp), %ecx
+ adcl 2220(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2224(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2228(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2232(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 2236(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2244(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2252(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2256(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2260(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2268(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2272(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2276(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 2280(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2144(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %edi
+ addl 2144(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2148(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2152(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2156(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2160(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 2164(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 2168(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2176(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2180(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2184(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2188(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2192(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2196(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2200(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2204(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 2208(%esp), %esi
+ adcl $0, %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 2072(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 2072(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2076(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2080(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2084(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2088(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 2092(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2096(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2100(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2104(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 2108(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2112(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 2116(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2120(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2124(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2128(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 2132(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2136(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2000(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 2000(%esp), %ecx
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 2004(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2008(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2012(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 2016(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2020(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2024(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2028(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2032(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 2036(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2040(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 2044(%esp), %edi
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 2048(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 2052(%esp), %ebp
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl 2056(%esp), %esi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 2060(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2064(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1928(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 1928(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1932(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1936(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1944(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1948(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1956(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1960(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1964(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1968(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1972(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 1976(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl 1980(%esp), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1984(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1988(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1992(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1856(%esp), %ecx
+ movl 2460(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 1856(%esp), %esi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1860(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1864(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1868(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1872(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1876(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1880(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1884(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1888(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1892(%esp), %esi
+ adcl 1896(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 1900(%esp), %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1904(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1908(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1912(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1916(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1920(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1784(%esp), %ecx
+ movl 2452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 1784(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1804(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1816(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1824(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1836(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1840(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1844(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1848(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1712(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1712(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1716(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1720(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1724(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1728(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1732(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1740(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1756(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1764(%esp), %ebp
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1768(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1772(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1640(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1640(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1664(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1668(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1680(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 1688(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ adcl 1692(%esp), %esi
+ movl %esi, %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1696(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1700(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1704(%esp), %esi
+ sbbl %eax, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1568(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 80(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1568(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1572(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1576(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1580(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1584(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1588(%esp), %ebp
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1592(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1596(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1600(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1604(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1608(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1612(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1616(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 1620(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1624(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1628(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 1632(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1496(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1496(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1500(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1504(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1512(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1516(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1544(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1424(%esp), %ecx
+ movl 2460(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ andl $1, %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 1424(%esp), %eax
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1432(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1440(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1444(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1448(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 1472(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1480(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1484(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1488(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 2456(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1352(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1352(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1396(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 1404(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1412(%esp), %esi
+ adcl 1416(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1280(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 1280(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1284(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1288(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1300(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1312(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1320(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1328(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1336(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1340(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1344(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 2456(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 2452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 1208(%esp), %ecx
+ adcl 1212(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1244(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1260(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1264(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1272(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1136(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1136(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl 1164(%esp), %edi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1188(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1192(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 1064(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 1064(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 1088(%esp), %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl 1092(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1116(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 992(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 996(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1000(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1004(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1008(%esp), %edi
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 1012(%esp), %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1016(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ adcl 1020(%esp), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1024(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1028(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 1032(%esp), %esi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1036(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1040(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1044(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1056(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 920(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 920(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 932(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 936(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 956(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 968(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 848(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 848(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 856(%esp), %edi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %esi # 4-byte Reload
+ adcl 868(%esp), %esi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 896(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 776(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 776(%esp), %ecx
+ adcl 780(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 784(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 792(%esp), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 800(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 704(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 712(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 728(%esp), %esi
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 732(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 752(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 92(%esp), %ecx # 4-byte Reload
+ addl 632(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 652(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl 656(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 664(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 676(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 680(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 92(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 560(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl 576(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 592(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 608(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 612(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 488(%esp), %ecx
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 500(%esp), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 508(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 516(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 520(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 536(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 416(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ adcl 436(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 440(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 448(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 116(%esp), %ecx # 4-byte Reload
+ addl 344(%esp), %ecx
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 348(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 356(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 360(%esp), %edi
+ adcl 364(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 116(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 272(%esp), %esi
+ adcl 276(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 288(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 296(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 308(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 120(%esp), %ecx # 4-byte Reload
+ addl 200(%esp), %ecx
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 212(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 220(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 232(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 244(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 2460(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ andl $1, %ebp
+ addl 128(%esp), %esi
+ movl 104(%esp), %ebx # 4-byte Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 140(%esp), %ebx
+ movl %ebx, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 156(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 160(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 168(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 172(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 176(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 180(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 184(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 188(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %edx # 4-byte Reload
+ adcl 192(%esp), %edx
+ movl %edx, 116(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %eax, %edx
+ movl 2460(%esp), %edi
+ subl (%edi), %edx
+ movl %ecx, %eax
+ sbbl 4(%edi), %eax
+ movl %ebx, %ecx
+ sbbl 8(%edi), %ecx
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 12(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl 16(%edi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 20(%edi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ sbbl 24(%edi), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ sbbl 28(%edi), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ sbbl 32(%edi), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ sbbl 36(%edi), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ sbbl 40(%edi), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ sbbl 44(%edi), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ sbbl 48(%edi), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ sbbl 52(%edi), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ sbbl 56(%edi), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ sbbl 60(%edi), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %edi # 4-byte Reload
+ sbbl $0, %ebp
+ andl $1, %ebp
+ movl %ebp, %ebx
+ jne .LBB242_2
+# BB#1:
+ movl %edx, %edi
+.LBB242_2:
+ movl 2448(%esp), %edx
+ movl %edi, (%edx)
+ testb %bl, %bl
+ movl 108(%esp), %edi # 4-byte Reload
+ jne .LBB242_4
+# BB#3:
+ movl %eax, %edi
+.LBB242_4:
+ movl %edi, 4(%edx)
+ jne .LBB242_6
+# BB#5:
+ movl %ecx, 104(%esp) # 4-byte Spill
+.LBB242_6:
+ movl 104(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%edx)
+ jne .LBB242_8
+# BB#7:
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+.LBB242_8:
+ movl 112(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edx)
+ movl 100(%esp), %eax # 4-byte Reload
+ jne .LBB242_10
+# BB#9:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB242_10:
+ movl %eax, 16(%edx)
+ movl 88(%esp), %eax # 4-byte Reload
+ jne .LBB242_12
+# BB#11:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB242_12:
+ movl %eax, 20(%edx)
+ jne .LBB242_14
+# BB#13:
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+.LBB242_14:
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ jne .LBB242_16
+# BB#15:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB242_16:
+ movl %eax, 28(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ jne .LBB242_18
+# BB#17:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB242_18:
+ movl %eax, 32(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB242_20
+# BB#19:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB242_20:
+ movl %eax, 36(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ jne .LBB242_22
+# BB#21:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB242_22:
+ movl %eax, 40(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ jne .LBB242_24
+# BB#23:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB242_24:
+ movl %eax, 44(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ jne .LBB242_26
+# BB#25:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB242_26:
+ movl %eax, 48(%edx)
+ movl 92(%esp), %eax # 4-byte Reload
+ jne .LBB242_28
+# BB#27:
+ movl 52(%esp), %eax # 4-byte Reload
+.LBB242_28:
+ movl %eax, 52(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ jne .LBB242_30
+# BB#29:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB242_30:
+ movl %eax, 56(%edx)
+ movl 116(%esp), %eax # 4-byte Reload
+ jne .LBB242_32
+# BB#31:
+ movl 120(%esp), %eax # 4-byte Reload
+.LBB242_32:
+ movl %eax, 60(%edx)
+ addl $2428, %esp # imm = 0x97C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end242:
+ .size mcl_fp_mont16L, .Lfunc_end242-mcl_fp_mont16L
+
+ .globl mcl_fp_montNF16L
+ .align 16, 0x90
+ .type mcl_fp_montNF16L,@function
+mcl_fp_montNF16L: # @mcl_fp_montNF16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2412, %esp # imm = 0x96C
+ calll .L243$pb
+.L243$pb:
+ popl %ebx
+.Ltmp54:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp54-.L243$pb), %ebx
+ movl 2444(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 2344(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 2344(%esp), %edi
+ movl 2348(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 2408(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2404(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2400(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2396(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2392(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2388(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2384(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2380(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 2376(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 2372(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 2368(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2364(%esp), %ebp
+ movl 2360(%esp), %esi
+ movl 2356(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2352(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 2272(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 2272(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2276(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2280(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2284(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 2288(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 2292(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2296(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2300(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 2304(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 2308(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2312(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2316(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2320(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2324(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 2328(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2332(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2336(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 2200(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 2264(%esp), %edx
+ movl 108(%esp), %ecx # 4-byte Reload
+ addl 2200(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2204(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2208(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 2216(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 2228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 2232(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 2236(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2244(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2248(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 2252(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 2260(%esp), %esi
+ adcl $0, %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2128(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 2128(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2132(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2136(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2140(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2144(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2148(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2152(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 2156(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2160(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 2164(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2172(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2176(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2180(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2184(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 2188(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 2192(%esp), %esi
+ movl 2440(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 2056(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 2120(%esp), %eax
+ movl 84(%esp), %edx # 4-byte Reload
+ addl 2056(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2060(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2064(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2068(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2072(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 2076(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 2080(%esp), %edi
+ movl %edi, %ebp
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 2084(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 2088(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2092(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2096(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 2100(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2104(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2108(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 2112(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 2116(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1984(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1984(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1988(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1992(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1996(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2000(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 2004(%esp), %edi
+ adcl 2008(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2016(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2020(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2024(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2028(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2032(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 2036(%esp), %ebp
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 2040(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2044(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2048(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1912(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1976(%esp), %eax
+ movl 76(%esp), %edx # 4-byte Reload
+ addl 1912(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1916(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1920(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1924(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1928(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1932(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1936(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1940(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1944(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 1948(%esp), %edi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1952(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1956(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1960(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ adcl 1964(%esp), %esi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1968(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1972(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1840(%esp), %ecx
+ movl 2444(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ addl 1840(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1844(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1848(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1852(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1856(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1864(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1868(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1872(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1876(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1880(%esp), %edi
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1884(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1888(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1892(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1896(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1900(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1904(%esp), %esi
+ movl 2440(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1768(%esp), %ecx
+ movl 2436(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ movl 1832(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1768(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1772(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1784(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1804(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 1808(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1828(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1696(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1696(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1700(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1704(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1708(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1712(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1716(%esp), %ebp
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1720(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1724(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1728(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1732(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1740(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1744(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1756(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1624(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1688(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 1624(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1628(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1640(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 1644(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 1648(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1664(%esp), %esi
+ movl %esi, %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1680(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1552(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1552(%esp), %esi
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1556(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1568(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1576(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1580(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1584(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1592(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1600(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1604(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1608(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1616(%esp), %edi
+ movl 2440(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1480(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1544(%esp), %eax
+ addl 1480(%esp), %esi
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 1484(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 1488(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1492(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1496(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 1500(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1504(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 1508(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1512(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1516(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1520(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ adcl 1524(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1528(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1532(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1536(%esp), %ebp
+ adcl 1540(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl $0, %edi
+ movl %esi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1408(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1408(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 1416(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1432(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1440(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1444(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1464(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1468(%esp), %ebp
+ adcl 1472(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1336(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1400(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1336(%esp), %ecx
+ adcl 1340(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1344(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1348(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 1352(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1356(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 1360(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1364(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1368(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1372(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 1376(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 1380(%esp), %edi
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1384(%esp), %esi
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1388(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 1392(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 1396(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1264(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1308(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ adcl 1312(%esp), %esi
+ movl %esi, %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1320(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1324(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 1192(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1256(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 1192(%esp), %ecx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1196(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1200(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1204(%esp), %esi
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1208(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1212(%esp), %edi
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1216(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1220(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1224(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 1228(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1232(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ adcl 1236(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1240(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 1244(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 1248(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 1252(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1120(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1120(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1132(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1140(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1144(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1176(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 1048(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1112(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 1048(%esp), %ecx
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 1052(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1068(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1076(%esp), %ebp
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1100(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 976(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 976(%esp), %edi
+ adcl 980(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 1000(%esp), %edi
+ adcl 1004(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1008(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1016(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 904(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 968(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 904(%esp), %eax
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 908(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 912(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 916(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 920(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 924(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 928(%esp), %edi
+ adcl 932(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 936(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ adcl 940(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 944(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 960(%esp), %ebp
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl %eax, %esi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 832(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 832(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 856(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 872(%esp), %esi
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 876(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 888(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 892(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 824(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 760(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 796(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 800(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 808(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 816(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 688(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 716(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 732(%esp), %ebp
+ adcl 736(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 680(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 616(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 624(%esp), %edi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 640(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 656(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 672(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 544(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 552(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 560(%esp), %edi
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 564(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 600(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 536(%esp), %edx
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 472(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 484(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ adcl 488(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 496(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 400(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 400(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 412(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 420(%esp), %edi
+ adcl 424(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 444(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 392(%esp), %edx
+ movl 92(%esp), %ecx # 4-byte Reload
+ addl 328(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 336(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 344(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 352(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 368(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 256(%esp), %ebp
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 260(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 268(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 280(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 284(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 248(%esp), %edx
+ movl %edi, %ecx
+ addl 184(%esp), %ecx
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 188(%esp), %edi
+ adcl 192(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 196(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 208(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 112(%esp), %esi
+ movl %edi, %eax
+ adcl 116(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 120(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ adcl 124(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 128(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 132(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 140(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 148(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 152(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 160(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 164(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 168(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 176(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 2444(%esp), %esi
+ subl (%esi), %edx
+ sbbl 4(%esi), %edi
+ movl %ebp, %ecx
+ sbbl 8(%esi), %ecx
+ movl %ebx, %eax
+ sbbl 12(%esi), %eax
+ movl 80(%esp), %ebx # 4-byte Reload
+ sbbl 16(%esi), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ sbbl 20(%esi), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ sbbl 24(%esi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %ebx # 4-byte Reload
+ sbbl 28(%esi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 48(%esp), %ebx # 4-byte Reload
+ sbbl 32(%esi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ sbbl 36(%esi), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ sbbl 40(%esi), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esi), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 48(%esi), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 96(%esp), %ebx # 4-byte Reload
+ sbbl 52(%esi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 92(%esp), %ebx # 4-byte Reload
+ sbbl 56(%esi), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 104(%esp), %ebx # 4-byte Reload
+ sbbl 60(%esi), %ebx
+ movl %ebx, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ testl %ebx, %ebx
+ js .LBB243_2
+# BB#1:
+ movl %edx, %esi
+.LBB243_2:
+ movl 2432(%esp), %edx
+ movl %esi, (%edx)
+ movl 108(%esp), %esi # 4-byte Reload
+ js .LBB243_4
+# BB#3:
+ movl %edi, %esi
+.LBB243_4:
+ movl %esi, 4(%edx)
+ js .LBB243_6
+# BB#5:
+ movl %ecx, %ebp
+.LBB243_6:
+ movl %ebp, 8(%edx)
+ movl 76(%esp), %ecx # 4-byte Reload
+ js .LBB243_8
+# BB#7:
+ movl %eax, %ecx
+.LBB243_8:
+ movl %ecx, 12(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB243_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB243_10:
+ movl %eax, 16(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB243_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB243_12:
+ movl %eax, 20(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB243_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB243_14:
+ movl %eax, 24(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB243_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB243_16:
+ movl %eax, 28(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB243_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB243_18:
+ movl %eax, 32(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB243_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB243_20:
+ movl %eax, 36(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB243_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB243_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB243_24
+# BB#23:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB243_24:
+ movl %eax, 44(%edx)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB243_26
+# BB#25:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB243_26:
+ movl %eax, 48(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB243_28
+# BB#27:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB243_28:
+ movl %eax, 52(%edx)
+ movl 92(%esp), %eax # 4-byte Reload
+ js .LBB243_30
+# BB#29:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB243_30:
+ movl %eax, 56(%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ js .LBB243_32
+# BB#31:
+ movl 84(%esp), %eax # 4-byte Reload
+.LBB243_32:
+ movl %eax, 60(%edx)
+ addl $2412, %esp # imm = 0x96C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end243:
+ .size mcl_fp_montNF16L, .Lfunc_end243-mcl_fp_montNF16L
+
+ .globl mcl_fp_montRed16L
+ .align 16, 0x90
+ .type mcl_fp_montRed16L,@function
+mcl_fp_montRed16L: # @mcl_fp_montRed16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1356, %esp # imm = 0x54C
+ calll .L244$pb
+.L244$pb:
+ popl %eax
+.Ltmp55:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp55-.L244$pb), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1384(%esp), %edx
+ movl -4(%edx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1380(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 112(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ imull %eax, %ebx
+ movl 124(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 112(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 108(%ecx), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl %esi, 152(%esp) # 4-byte Spill
+ movl 100(%ecx), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 92(%ecx), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 84(%ecx), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 80(%ecx), %edi
+ movl %edi, 148(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 192(%esp) # 4-byte Spill
+ movl 68(%ecx), %edi
+ movl %edi, 204(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ movl %esi, 200(%esp) # 4-byte Spill
+ movl 60(%ecx), %edi
+ movl %edi, 180(%esp) # 4-byte Spill
+ movl 56(%ecx), %edi
+ movl %edi, 164(%esp) # 4-byte Spill
+ movl 52(%ecx), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 40(%ecx), %ebp
+ movl 36(%ecx), %edi
+ movl 32(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 12(%ecx), %esi
+ movl 8(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 60(%edx), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 56(%edx), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 52(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 1288(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 1288(%esp), %eax
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1300(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1312(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1320(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1324(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ adcl 1328(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ adcl $0, 204(%esp) # 4-byte Folded Spill
+ adcl $0, 192(%esp) # 4-byte Folded Spill
+ movl 196(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1216(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ movl 112(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 1216(%esp), %esi
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1220(%esp), %edx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1244(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %esi # 4-byte Reload
+ adcl 1260(%esp), %esi
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1264(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ adcl $0, 192(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 196(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ movl 156(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 1144(%esp), %ebp
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1148(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 1184(%esp), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ adcl $0, 196(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ movl 168(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 156(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1072(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 1072(%esp), %esi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 168(%esp) # 4-byte Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1000(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 1000(%esp), %edi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1004(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ movl 188(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ movl 172(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 928(%esp), %esi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 932(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 188(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 172(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ movl 100(%esp), %ebp # 4-byte Reload
+ imull %ebp, %eax
+ movl %eax, (%esp)
+ leal 856(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 856(%esp), %edi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ movl 176(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 144(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull %ebp, %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 784(%esp), %esi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %ebp # 4-byte Reload
+ adcl 828(%esp), %ebp
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 176(%esp) # 4-byte Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ movl 156(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 712(%esp), %edi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ adcl 752(%esp), %ebp
+ movl %ebp, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %edi # 4-byte Reload
+ adcl 756(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 640(%esp), %esi
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 644(%esp), %ecx
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %esi # 4-byte Reload
+ adcl 668(%esp), %esi
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ adcl 680(%esp), %edi
+ movl %edi, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 1384(%esp), %eax
+ movl %eax, %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 568(%esp), %ebp
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 572(%esp), %ecx
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %ebp # 4-byte Reload
+ adcl 588(%esp), %ebp
+ adcl 592(%esp), %esi
+ movl %esi, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %esi # 4-byte Reload
+ adcl 596(%esp), %esi
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl 632(%esp), %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 496(%esp), %edi
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %edi # 4-byte Reload
+ adcl 508(%esp), %edi
+ adcl 512(%esp), %ebp
+ movl %ebp, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ adcl 520(%esp), %esi
+ movl %esi, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp # 4-byte Reload
+ adcl 532(%esp), %ebp
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 424(%esp), %esi
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ adcl 432(%esp), %edi
+ movl %edi, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %ecx # 4-byte Reload
+ adcl 436(%esp), %ecx
+ movl %ecx, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %ecx # 4-byte Reload
+ adcl 440(%esp), %ecx
+ movl %ecx, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %ecx # 4-byte Reload
+ adcl 448(%esp), %ecx
+ movl %ecx, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %ecx # 4-byte Reload
+ adcl 452(%esp), %ecx
+ movl %ecx, 196(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl 184(%esp), %ecx # 4-byte Reload
+ adcl 460(%esp), %ecx
+ movl %ecx, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %ecx # 4-byte Reload
+ adcl 464(%esp), %ecx
+ movl %ecx, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 484(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %eax, %esi
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 352(%esp), %esi
+ movl 164(%esp), %esi # 4-byte Reload
+ adcl 356(%esp), %esi
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl 380(%esp), %ebp
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl 416(%esp), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 280(%esp), %esi
+ movl 180(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl 304(%esp), %ebp
+ movl %ebp, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %esi # 4-byte Reload
+ adcl 316(%esp), %esi
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 344(%esp), %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ movl 124(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 208(%esp), %ebp
+ movl 200(%esp), %edx # 4-byte Reload
+ adcl 212(%esp), %edx
+ movl %edx, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %ecx # 4-byte Reload
+ adcl 220(%esp), %ecx
+ movl %ecx, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp # 4-byte Reload
+ adcl 228(%esp), %ebp
+ movl %ebp, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ adcl 240(%esp), %esi
+ movl %esi, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl 272(%esp), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %edx, %eax
+ subl 24(%esp), %edx # 4-byte Folded Reload
+ movl 204(%esp), %esi # 4-byte Reload
+ sbbl 12(%esp), %esi # 4-byte Folded Reload
+ sbbl 16(%esp), %ecx # 4-byte Folded Reload
+ movl 196(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ sbbl 28(%esp), %ebp # 4-byte Folded Reload
+ sbbl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 100(%esp) # 4-byte Spill
+ movl 188(%esp), %ebx # 4-byte Reload
+ sbbl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 104(%esp) # 4-byte Spill
+ movl 168(%esp), %ebx # 4-byte Reload
+ sbbl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 108(%esp) # 4-byte Spill
+ movl 176(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 112(%esp) # 4-byte Spill
+ movl 172(%esp), %ebx # 4-byte Reload
+ sbbl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 116(%esp) # 4-byte Spill
+ movl 152(%esp), %ebx # 4-byte Reload
+ sbbl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %ebx # 4-byte Reload
+ sbbl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 136(%esp) # 4-byte Spill
+ movl 144(%esp), %ebx # 4-byte Reload
+ sbbl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 140(%esp) # 4-byte Spill
+ movl 132(%esp), %ebx # 4-byte Reload
+ sbbl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 160(%esp) # 4-byte Spill
+ movl 128(%esp), %ebx # 4-byte Reload
+ sbbl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 164(%esp) # 4-byte Spill
+ movl 124(%esp), %ebx # 4-byte Reload
+ sbbl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 180(%esp) # 4-byte Spill
+ sbbl $0, %edi
+ andl $1, %edi
+ movl %edi, %ebx
+ jne .LBB244_2
+# BB#1:
+ movl %edx, 200(%esp) # 4-byte Spill
+.LBB244_2:
+ movl 1376(%esp), %edx
+ movl 200(%esp), %edi # 4-byte Reload
+ movl %edi, (%edx)
+ testb %bl, %bl
+ jne .LBB244_4
+# BB#3:
+ movl %esi, 204(%esp) # 4-byte Spill
+.LBB244_4:
+ movl 204(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%edx)
+ movl 192(%esp), %esi # 4-byte Reload
+ jne .LBB244_6
+# BB#5:
+ movl %ecx, %esi
+.LBB244_6:
+ movl %esi, 8(%edx)
+ movl 196(%esp), %ecx # 4-byte Reload
+ jne .LBB244_8
+# BB#7:
+ movl %eax, %ecx
+.LBB244_8:
+ movl %ecx, 12(%edx)
+ movl 128(%esp), %esi # 4-byte Reload
+ movl 148(%esp), %eax # 4-byte Reload
+ jne .LBB244_10
+# BB#9:
+ movl %ebp, %eax
+.LBB244_10:
+ movl %eax, 16(%edx)
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl 176(%esp), %eax # 4-byte Reload
+ movl 184(%esp), %ebp # 4-byte Reload
+ jne .LBB244_12
+# BB#11:
+ movl 100(%esp), %ebp # 4-byte Reload
+.LBB244_12:
+ movl %ebp, 20(%edx)
+ movl 152(%esp), %ebp # 4-byte Reload
+ movl 188(%esp), %ebx # 4-byte Reload
+ jne .LBB244_14
+# BB#13:
+ movl 104(%esp), %ebx # 4-byte Reload
+.LBB244_14:
+ movl %ebx, 24(%edx)
+ movl 156(%esp), %ebx # 4-byte Reload
+ movl 168(%esp), %edi # 4-byte Reload
+ jne .LBB244_16
+# BB#15:
+ movl 108(%esp), %edi # 4-byte Reload
+.LBB244_16:
+ movl %edi, 28(%edx)
+ movl 144(%esp), %edi # 4-byte Reload
+ jne .LBB244_18
+# BB#17:
+ movl 112(%esp), %eax # 4-byte Reload
+.LBB244_18:
+ movl %eax, 32(%edx)
+ jne .LBB244_20
+# BB#19:
+ movl 116(%esp), %eax # 4-byte Reload
+ movl %eax, 172(%esp) # 4-byte Spill
+.LBB244_20:
+ movl 172(%esp), %eax # 4-byte Reload
+ movl %eax, 36(%edx)
+ jne .LBB244_22
+# BB#21:
+ movl 120(%esp), %ebp # 4-byte Reload
+.LBB244_22:
+ movl %ebp, 40(%edx)
+ movl 132(%esp), %eax # 4-byte Reload
+ jne .LBB244_24
+# BB#23:
+ movl 136(%esp), %ebx # 4-byte Reload
+.LBB244_24:
+ movl %ebx, 44(%edx)
+ jne .LBB244_26
+# BB#25:
+ movl 140(%esp), %edi # 4-byte Reload
+.LBB244_26:
+ movl %edi, 48(%edx)
+ jne .LBB244_28
+# BB#27:
+ movl 160(%esp), %eax # 4-byte Reload
+.LBB244_28:
+ movl %eax, 52(%edx)
+ jne .LBB244_30
+# BB#29:
+ movl 164(%esp), %esi # 4-byte Reload
+.LBB244_30:
+ movl %esi, 56(%edx)
+ jne .LBB244_32
+# BB#31:
+ movl 180(%esp), %ecx # 4-byte Reload
+.LBB244_32:
+ movl %ecx, 60(%edx)
+ addl $1356, %esp # imm = 0x54C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end244:
+ .size mcl_fp_montRed16L, .Lfunc_end244-mcl_fp_montRed16L
+
+ .globl mcl_fp_addPre16L
+ .align 16, 0x90
+ .type mcl_fp_addPre16L,@function
+mcl_fp_addPre16L: # @mcl_fp_addPre16L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %ebx
+ adcl 8(%ecx), %ebx
+ movl 16(%esp), %edi
+ movl %edx, (%edi)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%edi)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %ebx, 8(%edi)
+ movl 20(%eax), %ebx
+ movl %edx, 12(%edi)
+ movl 20(%ecx), %edx
+ adcl %ebx, %edx
+ movl 24(%eax), %ebx
+ movl %esi, 16(%edi)
+ movl 24(%ecx), %esi
+ adcl %ebx, %esi
+ movl 28(%eax), %ebx
+ movl %edx, 20(%edi)
+ movl 28(%ecx), %edx
+ adcl %ebx, %edx
+ movl 32(%eax), %ebx
+ movl %esi, 24(%edi)
+ movl 32(%ecx), %esi
+ adcl %ebx, %esi
+ movl 36(%eax), %ebx
+ movl %edx, 28(%edi)
+ movl 36(%ecx), %edx
+ adcl %ebx, %edx
+ movl 40(%eax), %ebx
+ movl %esi, 32(%edi)
+ movl 40(%ecx), %esi
+ adcl %ebx, %esi
+ movl 44(%eax), %ebx
+ movl %edx, 36(%edi)
+ movl 44(%ecx), %edx
+ adcl %ebx, %edx
+ movl 48(%eax), %ebx
+ movl %esi, 40(%edi)
+ movl 48(%ecx), %esi
+ adcl %ebx, %esi
+ movl 52(%eax), %ebx
+ movl %edx, 44(%edi)
+ movl 52(%ecx), %edx
+ adcl %ebx, %edx
+ movl 56(%eax), %ebx
+ movl %esi, 48(%edi)
+ movl 56(%ecx), %esi
+ adcl %ebx, %esi
+ movl %edx, 52(%edi)
+ movl %esi, 56(%edi)
+ movl 60(%eax), %eax
+ movl 60(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 60(%edi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end245:
+ .size mcl_fp_addPre16L, .Lfunc_end245-mcl_fp_addPre16L
+
+ .globl mcl_fp_subPre16L
+ .align 16, 0x90
+ .type mcl_fp_subPre16L,@function
+mcl_fp_subPre16L: # @mcl_fp_subPre16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebp
+ sbbl 8(%edx), %ebp
+ movl 20(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebx)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebp, 8(%ebx)
+ movl 20(%edx), %ebp
+ movl %esi, 12(%ebx)
+ movl 20(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 24(%edx), %ebp
+ movl %edi, 16(%ebx)
+ movl 24(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 28(%edx), %ebp
+ movl %esi, 20(%ebx)
+ movl 28(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 32(%edx), %ebp
+ movl %edi, 24(%ebx)
+ movl 32(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 36(%edx), %ebp
+ movl %esi, 28(%ebx)
+ movl 36(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 40(%edx), %ebp
+ movl %edi, 32(%ebx)
+ movl 40(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 44(%edx), %ebp
+ movl %esi, 36(%ebx)
+ movl 44(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 48(%edx), %ebp
+ movl %edi, 40(%ebx)
+ movl 48(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 52(%edx), %ebp
+ movl %esi, 44(%ebx)
+ movl 52(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 56(%edx), %ebp
+ movl %edi, 48(%ebx)
+ movl 56(%ecx), %edi
+ sbbl %ebp, %edi
+ movl %esi, 52(%ebx)
+ movl %edi, 56(%ebx)
+ movl 60(%edx), %edx
+ movl 60(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 60(%ebx)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end246:
+ .size mcl_fp_subPre16L, .Lfunc_end246-mcl_fp_subPre16L
+
+ .globl mcl_fp_shr1_16L
+ .align 16, 0x90
+ .type mcl_fp_shr1_16L,@function
+mcl_fp_shr1_16L: # @mcl_fp_shr1_16L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 48(%ecx)
+ movl 56(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 52(%ecx)
+ movl 60(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 56(%ecx)
+ shrl %eax
+ movl %eax, 60(%ecx)
+ popl %esi
+ retl
+.Lfunc_end247:
+ .size mcl_fp_shr1_16L, .Lfunc_end247-mcl_fp_shr1_16L
+
+ .globl mcl_fp_add16L
+ .align 16, 0x90
+ .type mcl_fp_add16L,@function
+mcl_fp_add16L: # @mcl_fp_add16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 84(%esp), %edx
+ movl (%edx), %esi
+ movl 4(%edx), %ebp
+ movl 80(%esp), %ecx
+ addl (%ecx), %esi
+ movl %esi, %ebx
+ adcl 4(%ecx), %ebp
+ movl 8(%edx), %eax
+ adcl 8(%ecx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 12(%ecx), %esi
+ movl 16(%ecx), %edi
+ adcl 12(%edx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 16(%edx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ adcl 20(%edx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ adcl 24(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ adcl 28(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ adcl 32(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ adcl 36(%edx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ adcl 40(%edx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ adcl 44(%edx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ adcl 48(%edx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%ecx), %eax
+ adcl 52(%edx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ adcl 56(%edx), %esi
+ movl 60(%ecx), %ecx
+ adcl 60(%edx), %ecx
+ movl 76(%esp), %edx
+ movl %ebx, (%edx)
+ movl %ebx, %eax
+ movl %ebp, 4(%edx)
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%edx)
+ movl 48(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%edx)
+ movl %edi, 16(%edx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%edx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%edx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%edx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%edx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%edx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%edx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%edx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 48(%edx)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 52(%edx)
+ movl %esi, 56(%edx)
+ movl %ecx, 60(%edx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 88(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 8(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ sbbl 16(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 20(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ sbbl 24(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ sbbl 44(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 48(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ sbbl 52(%edi), %eax
+ movl %eax, %ebp
+ sbbl 56(%edi), %esi
+ sbbl 60(%edi), %ecx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB248_2
+# BB#1: # %nocarry
+ movl 4(%esp), %edi # 4-byte Reload
+ movl %edi, (%edx)
+ movl (%esp), %edi # 4-byte Reload
+ movl %edi, 4(%edx)
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%edx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%edx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%edx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%edx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%edx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%edx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%edx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%edx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%edx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%edx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 48(%edx)
+ movl %ebp, 52(%edx)
+ movl %esi, 56(%edx)
+ movl %ecx, 60(%edx)
+.LBB248_2: # %carry
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end248:
+ .size mcl_fp_add16L, .Lfunc_end248-mcl_fp_add16L
+
+ .globl mcl_fp_addNF16L
+ .align 16, 0x90
+ .type mcl_fp_addNF16L,@function
+mcl_fp_addNF16L: # @mcl_fp_addNF16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $124, %esp
+ movl 152(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %ecx
+ movl 148(%esp), %esi
+ addl (%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 4(%esi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%edx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 56(%edx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 48(%edx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 44(%edx), %edi
+ movl 40(%edx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 36(%edx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl 20(%edx), %ebp
+ movl 16(%edx), %ebx
+ movl 12(%edx), %ecx
+ movl 8(%edx), %edx
+ adcl 8(%esi), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 12(%esi), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 16(%esi), %ebx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl 24(%esi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 28(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 32(%esi), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 36(%esi), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 44(%esi), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esi), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 52(%esi), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 56(%esi), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 60(%esi), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 156(%esp), %edi
+ movl 80(%esp), %esi # 4-byte Reload
+ subl (%edi), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ sbbl 4(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 8(%edi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ sbbl 12(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ sbbl 20(%edi), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ sbbl 24(%edi), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 44(%edi), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 52(%edi), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ movl %ecx, %ebx
+ sbbl 56(%edi), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 60(%edi), %ebx
+ movl 80(%esp), %edi # 4-byte Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ testl %ebx, %ebx
+ js .LBB249_2
+# BB#1:
+ movl %esi, %edi
+.LBB249_2:
+ movl 144(%esp), %ebx
+ movl %edi, (%ebx)
+ movl 84(%esp), %edx # 4-byte Reload
+ js .LBB249_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+.LBB249_4:
+ movl %edx, 4(%ebx)
+ movl 68(%esp), %edx # 4-byte Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB249_6
+# BB#5:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB249_6:
+ movl %eax, 8(%ebx)
+ movl 100(%esp), %eax # 4-byte Reload
+ movl 88(%esp), %ecx # 4-byte Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ js .LBB249_8
+# BB#7:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB249_8:
+ movl %esi, 12(%ebx)
+ movl 108(%esp), %esi # 4-byte Reload
+ js .LBB249_10
+# BB#9:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB249_10:
+ movl %edx, 16(%ebx)
+ movl 112(%esp), %edi # 4-byte Reload
+ movl 104(%esp), %ebp # 4-byte Reload
+ js .LBB249_12
+# BB#11:
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+.LBB249_12:
+ movl 72(%esp), %edx # 4-byte Reload
+ movl %edx, 20(%ebx)
+ js .LBB249_14
+# BB#13:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB249_14:
+ movl %ecx, 24(%ebx)
+ js .LBB249_16
+# BB#15:
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 116(%esp) # 4-byte Spill
+.LBB249_16:
+ movl 116(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%ebx)
+ js .LBB249_18
+# BB#17:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB249_18:
+ movl %eax, 32(%ebx)
+ movl 96(%esp), %ecx # 4-byte Reload
+ js .LBB249_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 120(%esp) # 4-byte Spill
+.LBB249_20:
+ movl 120(%esp), %eax # 4-byte Reload
+ movl %eax, 36(%ebx)
+ js .LBB249_22
+# BB#21:
+ movl 36(%esp), %ebp # 4-byte Reload
+.LBB249_22:
+ movl %ebp, 40(%ebx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB249_24
+# BB#23:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB249_24:
+ movl %eax, 44(%ebx)
+ movl 92(%esp), %eax # 4-byte Reload
+ js .LBB249_26
+# BB#25:
+ movl 44(%esp), %esi # 4-byte Reload
+.LBB249_26:
+ movl %esi, 48(%ebx)
+ js .LBB249_28
+# BB#27:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB249_28:
+ movl %eax, 52(%ebx)
+ js .LBB249_30
+# BB#29:
+ movl 52(%esp), %ecx # 4-byte Reload
+.LBB249_30:
+ movl %ecx, 56(%ebx)
+ js .LBB249_32
+# BB#31:
+ movl 56(%esp), %edi # 4-byte Reload
+.LBB249_32:
+ movl %edi, 60(%ebx)
+ addl $124, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end249:
+ .size mcl_fp_addNF16L, .Lfunc_end249-mcl_fp_addNF16L
+
+ .globl mcl_fp_sub16L
+ .align 16, 0x90
+ .type mcl_fp_sub16L,@function
+mcl_fp_sub16L: # @mcl_fp_sub16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 88(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ sbbl 32(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 44(%esi), %edx
+ sbbl 44(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 48(%esi), %ecx
+ sbbl 48(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 52(%esi), %eax
+ sbbl 52(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 56(%esi), %ebp
+ sbbl 56(%edi), %ebp
+ movl 60(%esi), %esi
+ sbbl 60(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 80(%esp), %ebx
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 56(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%ebx)
+ movl %edx, 44(%ebx)
+ movl %ecx, 48(%ebx)
+ movl %eax, 52(%ebx)
+ movl %ebp, 56(%ebx)
+ movl %esi, 60(%ebx)
+ je .LBB250_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 92(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 8(%esi), %edi
+ movl 12(%esi), %eax
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 44(%esi), %eax
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl 48(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%ebx)
+ movl 52(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 48(%ebx)
+ movl %eax, 52(%ebx)
+ movl 56(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 56(%ebx)
+ movl 60(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%ebx)
+.LBB250_2: # %nocarry
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end250:
+ .size mcl_fp_sub16L, .Lfunc_end250-mcl_fp_sub16L
+
+ .globl mcl_fp_subNF16L
+ .align 16, 0x90
+ .type mcl_fp_subNF16L,@function
+mcl_fp_subNF16L: # @mcl_fp_subNF16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $104, %esp
+ movl 128(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 132(%esp), %edi
+ subl (%edi), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ sbbl 4(%edi), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 52(%ecx), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 36(%ecx), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ sbbl 36(%edi), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 52(%edi), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 56(%edi), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 60(%edi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sarl $31, %eax
+ movl 136(%esp), %esi
+ movl 60(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 48(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 40(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 36(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 32(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 28(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 24(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 20(%esi), %ebp
+ andl %eax, %ebp
+ movl 16(%esi), %ebx
+ andl %eax, %ebx
+ movl 12(%esi), %edi
+ andl %eax, %edi
+ movl 8(%esi), %edx
+ andl %eax, %edx
+ movl 4(%esi), %ecx
+ andl %eax, %ecx
+ andl (%esi), %eax
+ addl 64(%esp), %eax # 4-byte Folded Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl 124(%esp), %esi
+ movl %eax, (%esi)
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 4(%esi)
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edx, 8(%esi)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 12(%esi)
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebx, 16(%esi)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 20(%esi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 24(%esi)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 28(%esi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%esi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%esi)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%esi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 44(%esi)
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl %eax, 56(%esi)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esi)
+ addl $104, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end251:
+ .size mcl_fp_subNF16L, .Lfunc_end251-mcl_fp_subNF16L
+
+ .globl mcl_fpDbl_add16L
+ .align 16, 0x90
+ .type mcl_fpDbl_add16L,@function
+mcl_fpDbl_add16L: # @mcl_fpDbl_add16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $116, %esp
+ movl 144(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 140(%esp), %ebx
+ addl (%ebx), %esi
+ adcl 4(%ebx), %edx
+ movl 8(%ecx), %edi
+ adcl 8(%ebx), %edi
+ movl 12(%ebx), %ebp
+ movl 136(%esp), %eax
+ movl %esi, (%eax)
+ movl 16(%ebx), %esi
+ adcl 12(%ecx), %ebp
+ adcl 16(%ecx), %esi
+ movl %edx, 4(%eax)
+ movl 72(%ecx), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl %edi, 8(%eax)
+ movl 20(%ecx), %edx
+ movl %ebp, 12(%eax)
+ movl 20(%ebx), %edi
+ adcl %edx, %edi
+ movl 24(%ecx), %edx
+ movl %esi, 16(%eax)
+ movl 24(%ebx), %esi
+ adcl %edx, %esi
+ movl 28(%ecx), %edx
+ movl %edi, 20(%eax)
+ movl 28(%ebx), %edi
+ adcl %edx, %edi
+ movl 32(%ecx), %edx
+ movl %esi, 24(%eax)
+ movl 32(%ebx), %esi
+ adcl %edx, %esi
+ movl 36(%ecx), %edx
+ movl %edi, 28(%eax)
+ movl 36(%ebx), %edi
+ adcl %edx, %edi
+ movl 40(%ecx), %edx
+ movl %esi, 32(%eax)
+ movl 40(%ebx), %esi
+ adcl %edx, %esi
+ movl 44(%ecx), %edx
+ movl %edi, 36(%eax)
+ movl 44(%ebx), %edi
+ adcl %edx, %edi
+ movl 48(%ecx), %edx
+ movl %esi, 40(%eax)
+ movl 48(%ebx), %esi
+ adcl %edx, %esi
+ movl 52(%ecx), %edx
+ movl %edi, 44(%eax)
+ movl 52(%ebx), %edi
+ adcl %edx, %edi
+ movl 56(%ecx), %edx
+ movl %esi, 48(%eax)
+ movl 56(%ebx), %esi
+ adcl %edx, %esi
+ movl 60(%ecx), %edx
+ movl %edi, 52(%eax)
+ movl 60(%ebx), %ebp
+ adcl %edx, %ebp
+ movl 64(%ecx), %edx
+ movl %esi, 56(%eax)
+ movl 64(%ebx), %esi
+ adcl %edx, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%ecx), %edx
+ movl %ebp, 60(%eax)
+ movl 68(%ebx), %eax
+ adcl %edx, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 72(%ebx), %eax
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%ecx), %ebp
+ movl 76(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%ecx), %ebp
+ movl 80(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 84(%ecx), %ebp
+ movl 84(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 88(%ecx), %ebp
+ movl 88(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%ecx), %ebp
+ movl 92(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%ecx), %ebp
+ movl 96(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 100(%ecx), %ebp
+ movl 100(%ebx), %edx
+ adcl %ebp, %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 104(%ecx), %ebp
+ movl 104(%ebx), %edx
+ adcl %ebp, %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 108(%ecx), %ebp
+ movl 108(%ebx), %edx
+ adcl %ebp, %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 112(%ecx), %edx
+ movl 112(%ebx), %ebp
+ adcl %edx, %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 116(%ecx), %edx
+ movl 116(%ebx), %esi
+ adcl %edx, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 120(%ecx), %edx
+ movl 120(%ebx), %edi
+ adcl %edx, %edi
+ movl 124(%ecx), %ecx
+ movl 124(%ebx), %esi
+ adcl %ecx, %esi
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 148(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ subl (%edx), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 76(%esp), %ebx # 4-byte Reload
+ sbbl 4(%edx), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ sbbl 8(%edx), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ sbbl 12(%edx), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 104(%esp), %ebx # 4-byte Reload
+ sbbl 16(%edx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 20(%edx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 108(%esp), %ebx # 4-byte Reload
+ sbbl 24(%edx), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 92(%esp), %ebx # 4-byte Reload
+ sbbl 28(%edx), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ sbbl 32(%edx), %ebx
+ movl 112(%esp), %eax # 4-byte Reload
+ sbbl 36(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ sbbl 40(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 44(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl 48(%edx), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ movl %eax, %ebp
+ sbbl 52(%edx), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ sbbl 56(%edx), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ sbbl 60(%edx), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB252_2
+# BB#1:
+ movl %ebx, 64(%esp) # 4-byte Spill
+.LBB252_2:
+ testb %cl, %cl
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB252_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB252_4:
+ movl 136(%esp), %ebx
+ movl %ecx, 64(%ebx)
+ movl %esi, %ebp
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ movl 92(%esp), %ecx # 4-byte Reload
+ movl 88(%esp), %edx # 4-byte Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ jne .LBB252_6
+# BB#5:
+ movl 4(%esp), %esi # 4-byte Reload
+.LBB252_6:
+ movl %esi, 68(%ebx)
+ movl 84(%esp), %esi # 4-byte Reload
+ movl 80(%esp), %eax # 4-byte Reload
+ jne .LBB252_8
+# BB#7:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB252_8:
+ movl %eax, 72(%ebx)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB252_10
+# BB#9:
+ movl 12(%esp), %esi # 4-byte Reload
+.LBB252_10:
+ movl %esi, 76(%ebx)
+ jne .LBB252_12
+# BB#11:
+ movl 16(%esp), %esi # 4-byte Reload
+ movl %esi, 104(%esp) # 4-byte Spill
+.LBB252_12:
+ movl 104(%esp), %esi # 4-byte Reload
+ movl %esi, 80(%ebx)
+ jne .LBB252_14
+# BB#13:
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB252_14:
+ movl %edx, 84(%ebx)
+ jne .LBB252_16
+# BB#15:
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 108(%esp) # 4-byte Spill
+.LBB252_16:
+ movl 108(%esp), %edx # 4-byte Reload
+ movl %edx, 88(%ebx)
+ jne .LBB252_18
+# BB#17:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB252_18:
+ movl %ecx, 92(%ebx)
+ movl 64(%esp), %ecx # 4-byte Reload
+ movl %ecx, 96(%ebx)
+ jne .LBB252_20
+# BB#19:
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 112(%esp) # 4-byte Spill
+.LBB252_20:
+ movl 112(%esp), %ecx # 4-byte Reload
+ movl %ecx, 100(%ebx)
+ jne .LBB252_22
+# BB#21:
+ movl 36(%esp), %edi # 4-byte Reload
+.LBB252_22:
+ movl %edi, 104(%ebx)
+ movl 100(%esp), %ecx # 4-byte Reload
+ jne .LBB252_24
+# BB#23:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB252_24:
+ movl %ecx, 108(%ebx)
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB252_26
+# BB#25:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB252_26:
+ movl %eax, 112(%ebx)
+ movl 68(%esp), %eax # 4-byte Reload
+ jne .LBB252_28
+# BB#27:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB252_28:
+ movl %eax, 116(%ebx)
+ jne .LBB252_30
+# BB#29:
+ movl 52(%esp), %ecx # 4-byte Reload
+.LBB252_30:
+ movl %ecx, 120(%ebx)
+ jne .LBB252_32
+# BB#31:
+ movl 56(%esp), %ebp # 4-byte Reload
+.LBB252_32:
+ movl %ebp, 124(%ebx)
+ addl $116, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end252:
+ .size mcl_fpDbl_add16L, .Lfunc_end252-mcl_fpDbl_add16L
+
+ .globl mcl_fpDbl_sub16L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub16L,@function
+mcl_fpDbl_sub16L: # @mcl_fpDbl_sub16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ movl 132(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edi
+ movl 136(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%eax), %ebx
+ sbbl 8(%edx), %ebx
+ movl 128(%esp), %ecx
+ movl %esi, (%ecx)
+ movl 12(%eax), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ecx)
+ movl 16(%eax), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ecx)
+ movl 24(%eax), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ecx)
+ movl 32(%eax), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ecx)
+ movl 40(%eax), %edi
+ sbbl %ebx, %edi
+ movl 44(%edx), %ebx
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ sbbl %ebx, %esi
+ movl 48(%edx), %ebx
+ movl %edi, 40(%ecx)
+ movl 48(%eax), %edi
+ sbbl %ebx, %edi
+ movl 52(%edx), %ebx
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %esi
+ sbbl %ebx, %esi
+ movl 56(%edx), %ebx
+ movl %edi, 48(%ecx)
+ movl 56(%eax), %edi
+ sbbl %ebx, %edi
+ movl 60(%edx), %ebx
+ movl %esi, 52(%ecx)
+ movl 60(%eax), %esi
+ sbbl %ebx, %esi
+ movl 64(%edx), %ebx
+ movl %edi, 56(%ecx)
+ movl 64(%eax), %edi
+ sbbl %ebx, %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 68(%edx), %edi
+ movl %esi, 60(%ecx)
+ movl 68(%eax), %esi
+ sbbl %edi, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 72(%edx), %esi
+ movl 72(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 76(%edx), %esi
+ movl 76(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 80(%edx), %esi
+ movl 80(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 84(%edx), %esi
+ movl 84(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 88(%edx), %esi
+ movl 88(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 92(%edx), %esi
+ movl 92(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 96(%edx), %esi
+ movl 96(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 100(%edx), %esi
+ movl 100(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 104(%edx), %esi
+ movl 104(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 108(%edx), %esi
+ movl 108(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 112(%edx), %esi
+ movl 112(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 116(%edx), %esi
+ movl 116(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 120(%edx), %esi
+ movl 120(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 124(%edx), %edx
+ movl 124(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 140(%esp), %ebx
+ jne .LBB253_1
+# BB#2:
+ movl $0, 68(%esp) # 4-byte Folded Spill
+ jmp .LBB253_3
+.LBB253_1:
+ movl 60(%ebx), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+.LBB253_3:
+ testb %al, %al
+ jne .LBB253_4
+# BB#5:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ movl $0, %ebp
+ jmp .LBB253_6
+.LBB253_4:
+ movl (%ebx), %ebp
+ movl 4(%ebx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB253_6:
+ jne .LBB253_7
+# BB#8:
+ movl $0, 36(%esp) # 4-byte Folded Spill
+ jmp .LBB253_9
+.LBB253_7:
+ movl 56(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+.LBB253_9:
+ jne .LBB253_10
+# BB#11:
+ movl $0, 32(%esp) # 4-byte Folded Spill
+ jmp .LBB253_12
+.LBB253_10:
+ movl 52(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+.LBB253_12:
+ jne .LBB253_13
+# BB#14:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB253_15
+.LBB253_13:
+ movl 48(%ebx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+.LBB253_15:
+ jne .LBB253_16
+# BB#17:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB253_18
+.LBB253_16:
+ movl 44(%ebx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB253_18:
+ jne .LBB253_19
+# BB#20:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB253_21
+.LBB253_19:
+ movl 40(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB253_21:
+ jne .LBB253_22
+# BB#23:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB253_24
+.LBB253_22:
+ movl 36(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB253_24:
+ jne .LBB253_25
+# BB#26:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB253_27
+.LBB253_25:
+ movl 32(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB253_27:
+ jne .LBB253_28
+# BB#29:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB253_30
+.LBB253_28:
+ movl 28(%ebx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB253_30:
+ jne .LBB253_31
+# BB#32:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB253_33
+.LBB253_31:
+ movl 24(%ebx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB253_33:
+ jne .LBB253_34
+# BB#35:
+ movl $0, %esi
+ jmp .LBB253_36
+.LBB253_34:
+ movl 20(%ebx), %esi
+.LBB253_36:
+ jne .LBB253_37
+# BB#38:
+ movl $0, %edx
+ jmp .LBB253_39
+.LBB253_37:
+ movl 16(%ebx), %edx
+.LBB253_39:
+ jne .LBB253_40
+# BB#41:
+ movl $0, %edi
+ jmp .LBB253_42
+.LBB253_40:
+ movl 12(%ebx), %edi
+.LBB253_42:
+ jne .LBB253_43
+# BB#44:
+ xorl %ebx, %ebx
+ jmp .LBB253_45
+.LBB253_43:
+ movl 8(%ebx), %ebx
+.LBB253_45:
+ addl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ movl 24(%esp), %ebp # 4-byte Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 68(%ecx)
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 72(%ecx)
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 76(%ecx)
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 80(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 84(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 96(%ecx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 100(%ecx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 104(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 108(%ecx)
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 112(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 116(%ecx)
+ movl %eax, 120(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 124(%ecx)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end253:
+ .size mcl_fpDbl_sub16L, .Lfunc_end253-mcl_fpDbl_sub16L
+
+ .align 16, 0x90
+ .type .LmulPv544x32,@function
+.LmulPv544x32: # @mulPv544x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $120, %esp
+ movl %edx, %ebp
+ movl 140(%esp), %ebx
+ movl %ebx, %eax
+ mull 64(%ebp)
+ movl %edx, 116(%esp) # 4-byte Spill
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 60(%ebp)
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 56(%ebp)
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 52(%ebp)
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 48(%ebp)
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 44(%ebp)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 40(%ebp)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 36(%ebp)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 32(%ebp)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 28(%ebp)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 24(%ebp)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 20(%ebp)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 16(%ebp)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 12(%ebp)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 8(%ebp)
+ movl %edx, %edi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 4(%ebp)
+ movl %edx, %esi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull (%ebp)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 8(%ecx)
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%ecx)
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%ecx)
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 68(%ecx)
+ movl %ecx, %eax
+ addl $120, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end254:
+ .size .LmulPv544x32, .Lfunc_end254-.LmulPv544x32
+
+ .globl mcl_fp_mulUnitPre17L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre17L,@function
+mcl_fp_mulUnitPre17L: # @mcl_fp_mulUnitPre17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $140, %esp
+ calll .L255$pb
+.L255$pb:
+ popl %ebx
+.Ltmp56:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp56-.L255$pb), %ebx
+ movl 168(%esp), %eax
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 164(%esp), %edx
+ calll .LmulPv544x32
+ movl 132(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 108(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 104(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp
+ movl 80(%esp), %ebx
+ movl 76(%esp), %edi
+ movl 72(%esp), %esi
+ movl 64(%esp), %edx
+ movl 68(%esp), %ecx
+ movl 160(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, 64(%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, 68(%eax)
+ addl $140, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end255:
+ .size mcl_fp_mulUnitPre17L, .Lfunc_end255-mcl_fp_mulUnitPre17L
+
+ .globl mcl_fpDbl_mulPre17L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre17L,@function
+mcl_fpDbl_mulPre17L: # @mcl_fpDbl_mulPre17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1356, %esp # imm = 0x54C
+ calll .L256$pb
+.L256$pb:
+ popl %edi
+.Ltmp57:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp57-.L256$pb), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 1384(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1280(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl %edx, %esi
+ movl %edi, %ebx
+ calll .LmulPv544x32
+ movl 1348(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1344(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1340(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1332(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1328(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1320(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1316(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1312(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1308(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1304(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1300(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1296(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1292(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 1288(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 1280(%esp), %eax
+ movl 1284(%esp), %ebp
+ movl 1376(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 1384(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl %esi, %edx
+ movl %edi, %ebx
+ calll .LmulPv544x32
+ addl 1208(%esp), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 1276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1272(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1268(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1260(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1256(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 1252(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1248(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1244(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1236(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1232(%esp), %edi
+ movl 1228(%esp), %esi
+ movl 1224(%esp), %edx
+ movl 1220(%esp), %ecx
+ movl 1212(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1216(%esp), %eax
+ movl 1376(%esp), %ebp
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ movl 12(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 120(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 64(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1136(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 1136(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1204(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1200(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1196(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1192(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1188(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1176(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1168(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1164(%esp), %ebx
+ movl 1160(%esp), %edi
+ movl 1156(%esp), %esi
+ movl 1152(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1148(%esp), %edx
+ movl 1140(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1144(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1064(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1064(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 1132(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1128(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1124(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1120(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1116(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1108(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1100(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1096(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1092(%esp), %ebx
+ movl 1088(%esp), %edi
+ movl 1084(%esp), %esi
+ movl 1080(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1076(%esp), %edx
+ movl 1068(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1072(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 992(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1020(%esp), %ebx
+ movl 1016(%esp), %edi
+ movl 1012(%esp), %esi
+ movl 1008(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1004(%esp), %edx
+ movl 996(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1000(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 920(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 920(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 972(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 968(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 964(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 956(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 952(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 948(%esp), %ebx
+ movl 944(%esp), %edi
+ movl 940(%esp), %esi
+ movl 936(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 932(%esp), %edx
+ movl 924(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 928(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 848(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 848(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 916(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 912(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 908(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 904(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 900(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 892(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 888(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 876(%esp), %ebx
+ movl 872(%esp), %edi
+ movl 868(%esp), %esi
+ movl 864(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 860(%esp), %edx
+ movl 852(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 856(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 776(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 776(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 844(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 840(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 836(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 804(%esp), %ebx
+ movl 800(%esp), %edi
+ movl 796(%esp), %esi
+ movl 792(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 788(%esp), %edx
+ movl 780(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 704(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 732(%esp), %ebx
+ movl 728(%esp), %edi
+ movl 724(%esp), %esi
+ movl 720(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 716(%esp), %edx
+ movl 708(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 712(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 632(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 696(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 644(%esp), %edx
+ movl 636(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 640(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 588(%esp), %ebx
+ movl 584(%esp), %edi
+ movl 580(%esp), %esi
+ movl 576(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 488(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 524(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 520(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 516(%esp), %ebx
+ movl 512(%esp), %edi
+ movl 508(%esp), %esi
+ movl 504(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 500(%esp), %edx
+ movl 492(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 496(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 416(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 444(%esp), %ebx
+ movl 440(%esp), %edi
+ movl 436(%esp), %esi
+ movl 432(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 428(%esp), %edx
+ movl 420(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 424(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 344(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 372(%esp), %ebx
+ movl 368(%esp), %edi
+ movl 364(%esp), %esi
+ movl 360(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 356(%esp), %edx
+ movl 348(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 352(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 1380(%esp), %eax
+ movl %eax, %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 272(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 320(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 316(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 300(%esp), %ebx
+ movl 296(%esp), %edi
+ movl 292(%esp), %edx
+ movl 288(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl 280(%esp), %ecx
+ movl 120(%esp), %esi # 4-byte Reload
+ movl 1376(%esp), %ebp
+ movl %esi, 56(%ebp)
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebp
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %ecx
+ movl %ecx, %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 200(%esp), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 248(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 232(%esp), %edi
+ movl 228(%esp), %esi
+ movl 224(%esp), %edx
+ movl 220(%esp), %ecx
+ movl 216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl 204(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 208(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 1376(%esp), %ebx
+ movl %ebp, 60(%ebx)
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 64(%eax), %eax
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 128(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 192(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 184(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 156(%esp), %ebx
+ movl 152(%esp), %edi
+ movl 148(%esp), %esi
+ movl 144(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 64(%eax)
+ movl 64(%esp), %ebp # 4-byte Reload
+ movl %ebp, 68(%eax)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ movl %ebp, 72(%eax)
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 84(%eax)
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 88(%eax)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 92(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 108(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 116(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 112(%eax)
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 116(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 120(%eax)
+ movl %ecx, 124(%eax)
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 128(%eax)
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 132(%eax)
+ addl $1356, %esp # imm = 0x54C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end256:
+ .size mcl_fpDbl_mulPre17L, .Lfunc_end256-mcl_fpDbl_mulPre17L
+
+ .globl mcl_fpDbl_sqrPre17L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre17L,@function
+mcl_fpDbl_sqrPre17L: # @mcl_fpDbl_sqrPre17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1356, %esp # imm = 0x54C
+ calll .L257$pb
+.L257$pb:
+ popl %ebx
+.Ltmp58:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp58-.L257$pb), %ebx
+ movl %ebx, 124(%esp) # 4-byte Spill
+ movl 1380(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 1280(%esp), %ecx
+ movl %edx, %edi
+ movl %ebx, %esi
+ calll .LmulPv544x32
+ movl 1348(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1344(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1340(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1332(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1328(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1320(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1316(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1312(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1308(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1304(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1300(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1296(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1292(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 1288(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 1280(%esp), %eax
+ movl 1284(%esp), %ebp
+ movl 1376(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl %esi, %ebx
+ calll .LmulPv544x32
+ addl 1208(%esp), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 1276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1272(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1268(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1260(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1256(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 1252(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1248(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1244(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1236(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1232(%esp), %edi
+ movl 1228(%esp), %esi
+ movl 1224(%esp), %edx
+ movl 1220(%esp), %ecx
+ movl 1212(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1216(%esp), %eax
+ movl 1376(%esp), %ebp
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ movl 12(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 120(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 64(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 1136(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 1136(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1204(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1200(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1196(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1192(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1188(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1176(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1168(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1164(%esp), %ebx
+ movl 1160(%esp), %edi
+ movl 1156(%esp), %esi
+ movl 1152(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1148(%esp), %edx
+ movl 1140(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1144(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 1064(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1064(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 1132(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1128(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1124(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1120(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1116(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1108(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1100(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1096(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1092(%esp), %ebx
+ movl 1088(%esp), %edi
+ movl 1084(%esp), %esi
+ movl 1080(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1076(%esp), %edx
+ movl 1068(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1072(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 992(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1020(%esp), %ebx
+ movl 1016(%esp), %edi
+ movl 1012(%esp), %esi
+ movl 1008(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1004(%esp), %edx
+ movl 996(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1000(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 920(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 920(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 972(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 968(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 964(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 956(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 952(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 948(%esp), %ebx
+ movl 944(%esp), %edi
+ movl 940(%esp), %esi
+ movl 936(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 932(%esp), %edx
+ movl 924(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 928(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 848(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 848(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 916(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 912(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 908(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 904(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 900(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 892(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 888(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 876(%esp), %ebx
+ movl 872(%esp), %edi
+ movl 868(%esp), %esi
+ movl 864(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 860(%esp), %edx
+ movl 852(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 856(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 776(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 776(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 844(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 840(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 836(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 804(%esp), %ebx
+ movl 800(%esp), %edi
+ movl 796(%esp), %esi
+ movl 792(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 788(%esp), %edx
+ movl 780(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 704(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 732(%esp), %ebx
+ movl 728(%esp), %edi
+ movl 724(%esp), %esi
+ movl 720(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 716(%esp), %edx
+ movl 708(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 712(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 632(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 696(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 644(%esp), %edx
+ movl 636(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 640(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 588(%esp), %ebx
+ movl 584(%esp), %edi
+ movl 580(%esp), %esi
+ movl 576(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 44(%edx), %eax
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 488(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 524(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 520(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 516(%esp), %ebx
+ movl 512(%esp), %edi
+ movl 508(%esp), %esi
+ movl 504(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 500(%esp), %edx
+ movl 492(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 496(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 48(%edx), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 416(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 444(%esp), %ebx
+ movl 440(%esp), %edi
+ movl 436(%esp), %esi
+ movl 432(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 428(%esp), %edx
+ movl 420(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 424(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 52(%edx), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 344(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 372(%esp), %ebx
+ movl 368(%esp), %edi
+ movl 364(%esp), %esi
+ movl 360(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 356(%esp), %edx
+ movl 348(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 352(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 56(%edx), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 272(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 320(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 316(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 300(%esp), %ebx
+ movl 296(%esp), %edi
+ movl 292(%esp), %edx
+ movl 288(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl 280(%esp), %ecx
+ movl 120(%esp), %esi # 4-byte Reload
+ movl 1376(%esp), %ebp
+ movl %esi, 56(%ebp)
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebp
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 60(%edx), %eax
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 200(%esp), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 248(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 232(%esp), %edi
+ movl 228(%esp), %esi
+ movl 224(%esp), %edx
+ movl 220(%esp), %ecx
+ movl 216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl 204(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 208(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 1376(%esp), %ebx
+ movl %ebp, 60(%ebx)
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 64(%edx), %eax
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 128(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 192(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 184(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 156(%esp), %ebx
+ movl 152(%esp), %edi
+ movl 148(%esp), %esi
+ movl 144(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 64(%eax)
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 68(%eax)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ movl %ebp, 72(%eax)
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 84(%eax)
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 88(%eax)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 92(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 108(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 112(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 112(%eax)
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 116(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 120(%eax)
+ movl %ecx, 124(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 128(%eax)
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 132(%eax)
+ addl $1356, %esp # imm = 0x54C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end257:
+ .size mcl_fpDbl_sqrPre17L, .Lfunc_end257-mcl_fpDbl_sqrPre17L
+
+ .globl mcl_fp_mont17L
+ .align 16, 0x90
+ .type mcl_fp_mont17L,@function
+mcl_fp_mont17L: # @mcl_fp_mont17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2588, %esp # imm = 0xA1C
+ calll .L258$pb
+.L258$pb:
+ popl %ebx
+.Ltmp59:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp59-.L258$pb), %ebx
+ movl 2620(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 2512(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 2512(%esp), %ebp
+ movl 2516(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2580(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 2576(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 2572(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 2568(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 2564(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2560(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2556(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2552(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2548(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2544(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2540(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2536(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2532(%esp), %edi
+ movl 2528(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2524(%esp), %esi
+ movl 2520(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 2440(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ addl 2440(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2444(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 2452(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2456(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2460(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2464(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2468(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2472(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2480(%esp), %eax
+ movl %eax, %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2484(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2488(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2492(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2496(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2500(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 2504(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 2508(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl 2616(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 2368(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ addl 2368(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2372(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2376(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2380(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2384(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 2392(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2396(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2400(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 2404(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 2408(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2412(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2416(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2420(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2424(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 2428(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 2432(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 2436(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2296(%esp), %ecx
+ movl 2620(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ movl 116(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 2296(%esp), %ebp
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 2300(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 2304(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2308(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2312(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2316(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 2320(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2324(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2328(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2332(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 2336(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 2340(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2344(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 2348(%esp), %esi
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 2352(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 2356(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 2360(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 2364(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 2616(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 2224(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 2224(%esp), %ecx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2228(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2232(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2236(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2240(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2244(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2256(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2268(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 2272(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2276(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 2280(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 2284(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 2288(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl 2292(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2152(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 2152(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2156(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2164(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2168(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2172(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2176(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2180(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2184(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2188(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2192(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2196(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2200(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 2204(%esp), %ebp
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl 2208(%esp), %edi
+ movl 132(%esp), %esi # 4-byte Reload
+ adcl 2212(%esp), %esi
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2216(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2220(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 2080(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 2080(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2084(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2088(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2092(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2096(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2100(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2104(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2108(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2112(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2116(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2120(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2124(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 2128(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ adcl 2132(%esp), %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ adcl 2136(%esp), %esi
+ movl %esi, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2140(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2144(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 2148(%esp), %esi
+ sbbl %ebp, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2008(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl %ebp, %eax
+ andl $1, %eax
+ addl 2008(%esp), %edi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2012(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2016(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2020(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2024(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2028(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2032(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2036(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 2040(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 2044(%esp), %edi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2048(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 2052(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 2056(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 2060(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 2064(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 2068(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 2072(%esp), %ebp
+ adcl 2076(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1936(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 1936(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1944(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1948(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1956(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1964(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1968(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1972(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1976(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1980(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1984(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1988(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1992(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 1996(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2000(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2004(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1864(%esp), %ecx
+ movl 2620(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1864(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1868(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1872(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1876(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1880(%esp), %edi
+ adcl 1884(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1888(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1892(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 1896(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1900(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1904(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1908(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1912(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl 1916(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1920(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1924(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1928(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1932(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1792(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1792(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1804(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1820(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1836(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl 1840(%esp), %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1844(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 1848(%esp), %edi
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1852(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1856(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1860(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1720(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1720(%esp), %ecx
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1724(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1728(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1732(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1736(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1740(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1744(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1748(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1752(%esp), %esi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1756(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1760(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1764(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1768(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1772(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ adcl 1776(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ adcl 1780(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1784(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 1788(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1648(%esp), %ecx
+ movl 2612(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ movl 92(%esp), %eax # 4-byte Reload
+ addl 1648(%esp), %eax
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1652(%esp), %edi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1656(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1660(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1664(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1668(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1672(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1676(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1680(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1684(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1688(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1692(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1696(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1700(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1704(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1708(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl 1712(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1716(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %eax, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1576(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1576(%esp), %ebp
+ adcl 1580(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1584(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1588(%esp), %ebp
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1592(%esp), %edi
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1596(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1600(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1604(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1608(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1620(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1628(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1640(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1504(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1504(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1512(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl 1516(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1520(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl 1540(%esp), %edi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1544(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1568(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1572(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1432(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 76(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1432(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1436(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1440(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1448(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1452(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1460(%esp), %ebp
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1464(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl 1468(%esp), %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1472(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl 1476(%esp), %edi
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1480(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1484(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1488(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1492(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1496(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1500(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 1360(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 1360(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1384(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 1400(%esp), %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1408(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1288(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 68(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1288(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1300(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1312(%esp), %ebp
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1320(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1328(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 1336(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 1340(%esp), %edi
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1344(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1348(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1352(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 1216(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 1216(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1236(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 1240(%esp), %ebp
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1244(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1264(%esp), %edi
+ movl %edi, 112(%esp) # 4-byte Spill
+ adcl 1268(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 1144(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1148(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1152(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1156(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 1160(%esp), %edi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1164(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 1168(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1172(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1176(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %esi # 4-byte Reload
+ adcl 1180(%esp), %esi
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1184(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1188(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1192(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1196(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1200(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1204(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1208(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1212(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 1072(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1072(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 1080(%esp), %ebp
+ adcl 1084(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 1104(%esp), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1112(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1000(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 72(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1000(%esp), %edi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1004(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1008(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1012(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1016(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1020(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1024(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl 1028(%esp), %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1032(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 1036(%esp), %edi
+ adcl 1040(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1044(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1056(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1060(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1064(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl 952(%esp), %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 960(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 980(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 984(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 856(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 856(%esp), %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 888(%esp), %ebp
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 896(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 912(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2616(%esp), %ecx
+ movl %ecx, %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 2612(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 784(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 812(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 820(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 828(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 712(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %esi # 4-byte Reload
+ adcl 728(%esp), %esi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 736(%esp), %ebp
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 756(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 104(%esp), %ecx # 4-byte Reload
+ addl 640(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 652(%esp), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 660(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 680(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 696(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 104(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 568(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 600(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 608(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 616(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 624(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 108(%esp), %ecx # 4-byte Reload
+ addl 496(%esp), %ecx
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 524(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 528(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 540(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 544(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 108(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 424(%esp), %esi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %esi # 4-byte Reload
+ adcl 440(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 472(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 480(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 124(%esp), %ecx # 4-byte Reload
+ addl 352(%esp), %ecx
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 364(%esp), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 372(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 404(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 280(%esp), %ebp
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %esi # 4-byte Reload
+ adcl 288(%esp), %esi
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 296(%esp), %ebp
+ adcl 300(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 308(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 64(%eax), %eax
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 128(%esp), %ecx # 4-byte Reload
+ addl 208(%esp), %ecx
+ adcl 212(%esp), %esi
+ movl %esi, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 220(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 224(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 2620(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ andl $1, %edi
+ addl 136(%esp), %esi
+ movl 116(%esp), %edx # 4-byte Reload
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 148(%esp), %edx
+ movl %edx, 116(%esp) # 4-byte Spill
+ adcl 152(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 164(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 168(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 176(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 180(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 184(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 188(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 192(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 196(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 200(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 204(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 132(%esp), %ecx # 4-byte Reload
+ movl 2620(%esp), %ebx
+ subl (%ebx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 8(%ebx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ sbbl 12(%ebx), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %eax, %edx
+ sbbl 16(%ebx), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ sbbl 20(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 24(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 28(%ebx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ sbbl 32(%ebx), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ sbbl 36(%ebx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ sbbl 40(%ebx), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ sbbl 44(%ebx), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ sbbl 48(%ebx), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ sbbl 52(%ebx), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ sbbl 56(%ebx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ movl 108(%esp), %ebx # 4-byte Reload
+ sbbl 60(%ebp), %ebx
+ movl 124(%esp), %esi # 4-byte Reload
+ sbbl 64(%ebp), %esi
+ movl %esi, %ebp
+ sbbl $0, %edi
+ andl $1, %edi
+ jne .LBB258_2
+# BB#1:
+ movl %ebx, 108(%esp) # 4-byte Spill
+.LBB258_2:
+ movl %edi, %ebx
+ testb %bl, %bl
+ movl 132(%esp), %ebx # 4-byte Reload
+ jne .LBB258_4
+# BB#3:
+ movl 12(%esp), %ebx # 4-byte Reload
+.LBB258_4:
+ movl 2608(%esp), %eax
+ movl %ebx, (%eax)
+ movl 120(%esp), %ebx # 4-byte Reload
+ jne .LBB258_6
+# BB#5:
+ movl 16(%esp), %ebx # 4-byte Reload
+.LBB258_6:
+ movl %ebx, 4(%eax)
+ jne .LBB258_8
+# BB#7:
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 116(%esp) # 4-byte Spill
+.LBB258_8:
+ movl 116(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ jne .LBB258_10
+# BB#9:
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 100(%esp) # 4-byte Spill
+.LBB258_10:
+ movl 100(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 112(%esp), %esi # 4-byte Reload
+ jne .LBB258_12
+# BB#11:
+ movl 28(%esp), %esi # 4-byte Reload
+.LBB258_12:
+ movl %esi, 16(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ jne .LBB258_14
+# BB#13:
+ movl 32(%esp), %edx # 4-byte Reload
+.LBB258_14:
+ movl %edx, 20(%eax)
+ jne .LBB258_16
+# BB#15:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB258_16:
+ movl %ecx, 24(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ jne .LBB258_18
+# BB#17:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB258_18:
+ movl %ecx, 28(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ jne .LBB258_20
+# BB#19:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB258_20:
+ movl %ecx, 32(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB258_22
+# BB#21:
+ movl 48(%esp), %ecx # 4-byte Reload
+.LBB258_22:
+ movl %ecx, 36(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB258_24
+# BB#23:
+ movl 52(%esp), %ecx # 4-byte Reload
+.LBB258_24:
+ movl %ecx, 40(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB258_26
+# BB#25:
+ movl 56(%esp), %ecx # 4-byte Reload
+.LBB258_26:
+ movl %ecx, 44(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ jne .LBB258_28
+# BB#27:
+ movl 60(%esp), %ecx # 4-byte Reload
+.LBB258_28:
+ movl %ecx, 48(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ jne .LBB258_30
+# BB#29:
+ movl 88(%esp), %ecx # 4-byte Reload
+.LBB258_30:
+ movl %ecx, 52(%eax)
+ movl 104(%esp), %ecx # 4-byte Reload
+ jne .LBB258_32
+# BB#31:
+ movl 128(%esp), %ecx # 4-byte Reload
+.LBB258_32:
+ movl %ecx, 56(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl 124(%esp), %ecx # 4-byte Reload
+ jne .LBB258_34
+# BB#33:
+ movl %ebp, %ecx
+.LBB258_34:
+ movl %ecx, 64(%eax)
+ addl $2588, %esp # imm = 0xA1C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end258:
+ .size mcl_fp_mont17L, .Lfunc_end258-mcl_fp_mont17L
+
+ .globl mcl_fp_montNF17L
+ .align 16, 0x90
+ .type mcl_fp_montNF17L,@function
+mcl_fp_montNF17L: # @mcl_fp_montNF17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2572, %esp # imm = 0xA0C
+ calll .L259$pb
+.L259$pb:
+ popl %ebx
+.Ltmp60:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp60-.L259$pb), %ebx
+ movl 2604(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 2496(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 2496(%esp), %edi
+ movl 2500(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 2564(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 2560(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2556(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 2552(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2548(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2544(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2540(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2536(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2532(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2528(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 2524(%esp), %ebp
+ movl 2520(%esp), %esi
+ movl 2516(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2512(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2508(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2504(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 2424(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 2424(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2428(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2432(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2436(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2440(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2444(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 2448(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 2452(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 2456(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2460(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2464(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2468(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 2472(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2476(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 2480(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2484(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2488(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2492(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 2352(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 2420(%esp), %ecx
+ movl 112(%esp), %edx # 4-byte Reload
+ addl 2352(%esp), %edx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2356(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2360(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2368(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2372(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2376(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 2380(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2384(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2388(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2392(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 2396(%esp), %esi
+ movl %esi, %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2400(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2404(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2408(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2412(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2416(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2280(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 2280(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2284(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2288(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2292(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2296(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2300(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2304(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2308(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2312(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 2316(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 2324(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2332(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2336(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 2340(%esp), %ebp
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 2344(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2348(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 2208(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 2276(%esp), %eax
+ movl 92(%esp), %edx # 4-byte Reload
+ addl 2208(%esp), %edx
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2212(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2216(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 2220(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2224(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 2228(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 2232(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 2236(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 2240(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2244(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2248(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2252(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 2256(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 2260(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 2264(%esp), %ebp
+ adcl 2268(%esp), %edi
+ movl %edi, %esi
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 2272(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2136(%esp), %ecx
+ movl 2604(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ addl 2136(%esp), %edi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2140(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2144(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2148(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2156(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2160(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2164(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2168(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2172(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2176(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2180(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2184(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 2188(%esp), %edi
+ adcl 2192(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ adcl 2196(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 2200(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2204(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 2064(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 2132(%esp), %eax
+ movl 104(%esp), %edx # 4-byte Reload
+ addl 2064(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2068(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 2072(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2076(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 2080(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 2084(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 2088(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2092(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2096(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2100(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2104(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 2108(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 2112(%esp), %edi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 2116(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 2120(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 2124(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2128(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1992(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1992(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1996(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2000(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2004(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2008(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 2016(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2020(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2024(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2028(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2032(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2036(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 2040(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 2044(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2048(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2052(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 2056(%esp), %edi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2060(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1920(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1988(%esp), %eax
+ movl 76(%esp), %edx # 4-byte Reload
+ addl 1920(%esp), %edx
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1924(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1928(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1932(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1936(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1940(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1944(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1948(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1952(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1956(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1960(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1964(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 1968(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1972(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 1976(%esp), %esi
+ adcl 1980(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1984(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1848(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1848(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1852(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1856(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1860(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1864(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1868(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1872(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1876(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1880(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1884(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1888(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1892(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1896(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1900(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 1904(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1908(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1912(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1916(%esp), %eax
+ movl %eax, %edi
+ movl 2600(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1776(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1844(%esp), %eax
+ movl 84(%esp), %edx # 4-byte Reload
+ addl 1776(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1780(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1784(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1788(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1792(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1796(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1800(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1804(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1808(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1812(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1816(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1820(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1824(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1828(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl 1832(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1836(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1840(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1704(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1704(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1708(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1712(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1716(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1720(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1724(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1728(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1732(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1736(%esp), %esi
+ movl %esi, %ebp
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 1740(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1744(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1756(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1768(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1772(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1632(%esp), %ecx
+ movl 2596(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ movl 1700(%esp), %eax
+ movl 80(%esp), %edx # 4-byte Reload
+ addl 1632(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1636(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1640(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1644(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1648(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1652(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1656(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1660(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl 1664(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1668(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1672(%esp), %esi
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1676(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1680(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1684(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1688(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1692(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1696(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1560(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1560(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1564(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1568(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1576(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1580(%esp), %edi
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1584(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1592(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1600(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1604(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 1608(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1628(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1488(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1556(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1488(%esp), %ecx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1492(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1496(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 1500(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 1504(%esp), %edi
+ adcl 1508(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1512(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 1516(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 1520(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1524(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %edx # 4-byte Reload
+ adcl 1528(%esp), %edx
+ movl %edx, 116(%esp) # 4-byte Spill
+ adcl 1532(%esp), %esi
+ movl %esi, %ebp
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1536(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1540(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1544(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1548(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1552(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1416(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1416(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1428(%esp), %esi
+ adcl 1432(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1436(%esp), %edi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1444(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1448(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 1460(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1472(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1484(%esp), %ebp
+ movl 2600(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 1344(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1412(%esp), %eax
+ movl 52(%esp), %edx # 4-byte Reload
+ addl 1344(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1348(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 1352(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1360(%esp), %edi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1364(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1368(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1372(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1376(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1380(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1384(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1388(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1392(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1396(%esp), %esi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1400(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1404(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1408(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1272(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1272(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1288(%esp), %edi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1304(%esp), %ebp
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1312(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1320(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1324(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 1200(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1268(%esp), %eax
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 1200(%esp), %ecx
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 1204(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 1208(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 1212(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1216(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 1220(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1224(%esp), %esi
+ adcl 1228(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 1232(%esp), %edi
+ movl 112(%esp), %edx # 4-byte Reload
+ adcl 1236(%esp), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1240(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1244(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1248(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1252(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1256(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 1260(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1264(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1128(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1128(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1152(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1160(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 1172(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1180(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1188(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 1056(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1124(%esp), %edx
+ movl 68(%esp), %eax # 4-byte Reload
+ addl 1056(%esp), %eax
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1060(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1064(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1072(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1088(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1092(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1096(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1100(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 1104(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1108(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1112(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 1116(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1120(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 984(%esp), %esi
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 988(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 996(%esp), %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1044(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 912(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 980(%esp), %eax
+ addl 912(%esp), %esi
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 916(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 920(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 924(%esp), %edi
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 928(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 932(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %edx # 4-byte Reload
+ adcl 936(%esp), %edx
+ movl %edx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ adcl 940(%esp), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 944(%esp), %ebp
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 960(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 968(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 972(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 976(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 840(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 840(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 864(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 872(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 888(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 768(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 836(%esp), %edx
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 768(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 788(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 792(%esp), %edi
+ adcl 796(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 812(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 828(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 696(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 716(%esp), %esi
+ adcl 720(%esp), %edi
+ movl %edi, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 732(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 756(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 692(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 624(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 636(%esp), %ebp
+ adcl 640(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 656(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 660(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 552(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 564(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 576(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 588(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 600(%esp), %ebp
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 604(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 548(%esp), %edx
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 480(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 500(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 524(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 528(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 408(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 420(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 432(%esp), %ebp
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 436(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 336(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 404(%esp), %edx
+ movl 108(%esp), %ecx # 4-byte Reload
+ addl 336(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 344(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 356(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ adcl 360(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 364(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 372(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 264(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 280(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 300(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 308(%esp), %edi
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 312(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 64(%eax), %eax
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 260(%esp), %edx
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 192(%esp), %ecx
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 204(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 236(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 120(%esp), %esi
+ movl 92(%esp), %esi # 4-byte Reload
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 128(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ adcl 132(%esp), %esi
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 156(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 160(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 168(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 172(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 176(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 180(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 184(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 188(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 2604(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %ebp
+ movl %esi, %ebx
+ sbbl 8(%edi), %ebx
+ movl 104(%esp), %ecx # 4-byte Reload
+ sbbl 12(%edi), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ sbbl 20(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 24(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 44(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 48(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 52(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ sbbl 56(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 60(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 64(%edi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ sarl $31, %eax
+ testl %eax, %eax
+ movl 116(%esp), %edi # 4-byte Reload
+ js .LBB259_2
+# BB#1:
+ movl %edx, %edi
+.LBB259_2:
+ movl 2592(%esp), %edx
+ movl %edi, (%edx)
+ movl 112(%esp), %edi # 4-byte Reload
+ js .LBB259_4
+# BB#3:
+ movl %ebp, %edi
+.LBB259_4:
+ movl %edi, 4(%edx)
+ js .LBB259_6
+# BB#5:
+ movl %ebx, %esi
+.LBB259_6:
+ movl %esi, 8(%edx)
+ movl 104(%esp), %esi # 4-byte Reload
+ js .LBB259_8
+# BB#7:
+ movl %ecx, %esi
+.LBB259_8:
+ movl %esi, 12(%edx)
+ movl 76(%esp), %ecx # 4-byte Reload
+ js .LBB259_10
+# BB#9:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB259_10:
+ movl %ecx, 16(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB259_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB259_12:
+ movl %eax, 20(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB259_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB259_14:
+ movl %eax, 24(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB259_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB259_16:
+ movl %eax, 28(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB259_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB259_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB259_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB259_20:
+ movl %eax, 36(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB259_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB259_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB259_24
+# BB#23:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB259_24:
+ movl %eax, 44(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB259_26
+# BB#25:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB259_26:
+ movl %eax, 48(%edx)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB259_28
+# BB#27:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB259_28:
+ movl %eax, 52(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB259_30
+# BB#29:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB259_30:
+ movl %eax, 56(%edx)
+ movl 108(%esp), %eax # 4-byte Reload
+ js .LBB259_32
+# BB#31:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB259_32:
+ movl %eax, 60(%edx)
+ movl 100(%esp), %eax # 4-byte Reload
+ js .LBB259_34
+# BB#33:
+ movl 92(%esp), %eax # 4-byte Reload
+.LBB259_34:
+ movl %eax, 64(%edx)
+ addl $2572, %esp # imm = 0xA0C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end259:
+ .size mcl_fp_montNF17L, .Lfunc_end259-mcl_fp_montNF17L
+
+ .globl mcl_fp_montRed17L
+ .align 16, 0x90
+ .type mcl_fp_montRed17L,@function
+mcl_fp_montRed17L: # @mcl_fp_montRed17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1436, %esp # imm = 0x59C
+ calll .L260$pb
+.L260$pb:
+ popl %eax
+.Ltmp61:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp61-.L260$pb), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1464(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 1460(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 132(%ecx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 128(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 124(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 112(%ecx), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 108(%ecx), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 100(%ecx), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 92(%ecx), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl %esi, 180(%esp) # 4-byte Spill
+ movl 80(%ecx), %edi
+ movl %edi, 196(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 192(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 204(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 64(%ecx), %ebp
+ movl %ebp, 176(%esp) # 4-byte Spill
+ movl 60(%ecx), %ebp
+ movl %ebp, 164(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 52(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %esi
+ movl 12(%ecx), %edi
+ movl 8(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 64(%edx), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 1360(%esp), %ecx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 76(%esp), %eax # 4-byte Reload
+ addl 1360(%esp), %eax
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1364(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1372(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1376(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 1380(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ adcl $0, 204(%esp) # 4-byte Folded Spill
+ adcl $0, 192(%esp) # 4-byte Folded Spill
+ adcl $0, 196(%esp) # 4-byte Folded Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ movl 128(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1288(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1288(%esp), %esi
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1292(%esp), %edx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1312(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1320(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1324(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 1336(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ adcl $0, 192(%esp) # 4-byte Folded Spill
+ adcl $0, 196(%esp) # 4-byte Folded Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %esi
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 128(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1216(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1216(%esp), %ebp
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1244(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 1260(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl 1264(%esp), %edi
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ adcl $0, 196(%esp) # 4-byte Folded Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 184(%esp) # 4-byte Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1144(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1148(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl 1188(%esp), %edi
+ movl %edi, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ movl 188(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1072(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1072(%esp), %esi
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1076(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 188(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ movl 172(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ movl 152(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1000(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1000(%esp), %esi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1004(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 172(%esp) # 4-byte Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 928(%esp), %esi
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 932(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 160(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 856(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 856(%esp), %esi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ adcl 924(%esp), %ebp
+ movl %ebp, 168(%esp) # 4-byte Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 160(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ movl 96(%esp), %ebp # 4-byte Reload
+ imull %ebp, %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 784(%esp), %edi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull %ebp, %eax
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 712(%esp), %esi
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %ebp # 4-byte Reload
+ adcl 760(%esp), %ebp
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl 780(%esp), %edi
+ movl %edi, 156(%esp) # 4-byte Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 640(%esp), %esi
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 644(%esp), %ecx
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %edi # 4-byte Reload
+ adcl 672(%esp), %edi
+ movl 192(%esp), %esi # 4-byte Reload
+ adcl 676(%esp), %esi
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl 684(%esp), %ebp
+ movl %ebp, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 1464(%esp), %eax
+ movl %eax, %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 568(%esp), %ebp
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 572(%esp), %ecx
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %ebp # 4-byte Reload
+ adcl 588(%esp), %ebp
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ adcl 596(%esp), %edi
+ movl %edi, 204(%esp) # 4-byte Spill
+ adcl 600(%esp), %esi
+ movl %esi, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %esi # 4-byte Reload
+ adcl 604(%esp), %esi
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 496(%esp), %edi
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 500(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl 512(%esp), %ebp
+ movl %ebp, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %ebp # 4-byte Reload
+ adcl 516(%esp), %ebp
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ adcl 528(%esp), %esi
+ movl %esi, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %edi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 424(%esp), %edi
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 428(%esp), %ecx
+ movl 164(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ adcl 440(%esp), %ebp
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 352(%esp), %esi
+ movl %edi, %ecx
+ adcl 356(%esp), %ecx
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ adcl 364(%esp), %ebp
+ movl %ebp, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %edi # 4-byte Reload
+ adcl 384(%esp), %edi
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 280(%esp), %ebp
+ movl 176(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ adcl 308(%esp), %edi
+ movl %edi, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 208(%esp), %ebp
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %edx # 4-byte Reload
+ adcl 216(%esp), %edx
+ movl %edx, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %ecx # 4-byte Reload
+ adcl 220(%esp), %ecx
+ movl %ecx, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %ebx # 4-byte Reload
+ adcl 264(%esp), %ebx
+ movl %ebx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 276(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 200(%esp), %edi # 4-byte Reload
+ subl 16(%esp), %edi # 4-byte Folded Reload
+ sbbl 4(%esp), %edx # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ movl 196(%esp), %eax # 4-byte Reload
+ sbbl 12(%esp), %eax # 4-byte Folded Reload
+ sbbl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 184(%esp), %esi # 4-byte Reload
+ sbbl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 188(%esp), %esi # 4-byte Reload
+ sbbl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 168(%esp), %esi # 4-byte Reload
+ sbbl 32(%esp), %esi # 4-byte Folded Reload
+ movl 172(%esp), %ebp # 4-byte Reload
+ sbbl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ sbbl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 160(%esp), %ebp # 4-byte Reload
+ sbbl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ sbbl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 144(%esp), %ebp # 4-byte Reload
+ sbbl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 136(%esp) # 4-byte Spill
+ sbbl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 140(%esp) # 4-byte Spill
+ movl 132(%esp), %ebx # 4-byte Reload
+ sbbl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 148(%esp) # 4-byte Spill
+ movl 124(%esp), %ebx # 4-byte Reload
+ sbbl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 164(%esp) # 4-byte Spill
+ movl 116(%esp), %ebx # 4-byte Reload
+ sbbl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 176(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB260_2
+# BB#1:
+ movl %esi, 168(%esp) # 4-byte Spill
+.LBB260_2:
+ testb %bl, %bl
+ movl 200(%esp), %esi # 4-byte Reload
+ jne .LBB260_4
+# BB#3:
+ movl %edi, %esi
+.LBB260_4:
+ movl 1456(%esp), %edi
+ movl %esi, (%edi)
+ movl 156(%esp), %esi # 4-byte Reload
+ movl 204(%esp), %ebx # 4-byte Reload
+ jne .LBB260_6
+# BB#5:
+ movl %edx, %ebx
+.LBB260_6:
+ movl %ebx, 4(%edi)
+ movl 144(%esp), %ebx # 4-byte Reload
+ movl 192(%esp), %edx # 4-byte Reload
+ jne .LBB260_8
+# BB#7:
+ movl %ecx, %edx
+.LBB260_8:
+ movl %edx, 8(%edi)
+ movl 132(%esp), %edx # 4-byte Reload
+ movl 196(%esp), %ecx # 4-byte Reload
+ jne .LBB260_10
+# BB#9:
+ movl %eax, %ecx
+.LBB260_10:
+ movl %ecx, 12(%edi)
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl 180(%esp), %eax # 4-byte Reload
+ jne .LBB260_12
+# BB#11:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB260_12:
+ movl %eax, 16(%edi)
+ movl 188(%esp), %eax # 4-byte Reload
+ movl 184(%esp), %ebp # 4-byte Reload
+ jne .LBB260_14
+# BB#13:
+ movl 92(%esp), %ebp # 4-byte Reload
+.LBB260_14:
+ movl %ebp, 20(%edi)
+ movl 152(%esp), %ebp # 4-byte Reload
+ jne .LBB260_16
+# BB#15:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB260_16:
+ movl %eax, 24(%edi)
+ movl 168(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%edi)
+ jne .LBB260_18
+# BB#17:
+ movl 104(%esp), %eax # 4-byte Reload
+ movl %eax, 172(%esp) # 4-byte Spill
+.LBB260_18:
+ movl 172(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%edi)
+ jne .LBB260_20
+# BB#19:
+ movl 108(%esp), %esi # 4-byte Reload
+.LBB260_20:
+ movl %esi, 36(%edi)
+ jne .LBB260_22
+# BB#21:
+ movl 112(%esp), %eax # 4-byte Reload
+ movl %eax, 160(%esp) # 4-byte Spill
+.LBB260_22:
+ movl 160(%esp), %esi # 4-byte Reload
+ movl %esi, 40(%edi)
+ movl 128(%esp), %eax # 4-byte Reload
+ jne .LBB260_24
+# BB#23:
+ movl 120(%esp), %ebp # 4-byte Reload
+.LBB260_24:
+ movl %ebp, 44(%edi)
+ jne .LBB260_26
+# BB#25:
+ movl 136(%esp), %ebx # 4-byte Reload
+.LBB260_26:
+ movl %ebx, 48(%edi)
+ jne .LBB260_28
+# BB#27:
+ movl 140(%esp), %eax # 4-byte Reload
+.LBB260_28:
+ movl %eax, 52(%edi)
+ jne .LBB260_30
+# BB#29:
+ movl 148(%esp), %edx # 4-byte Reload
+.LBB260_30:
+ movl %edx, 56(%edi)
+ movl 116(%esp), %eax # 4-byte Reload
+ jne .LBB260_32
+# BB#31:
+ movl 164(%esp), %ecx # 4-byte Reload
+.LBB260_32:
+ movl %ecx, 60(%edi)
+ jne .LBB260_34
+# BB#33:
+ movl 176(%esp), %eax # 4-byte Reload
+.LBB260_34:
+ movl %eax, 64(%edi)
+ addl $1436, %esp # imm = 0x59C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end260:
+ .size mcl_fp_montRed17L, .Lfunc_end260-mcl_fp_montRed17L
+
+ .globl mcl_fp_addPre17L
+ .align 16, 0x90
+ .type mcl_fp_addPre17L,@function
+mcl_fp_addPre17L: # @mcl_fp_addPre17L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %ebx
+ adcl 8(%ecx), %ebx
+ movl 16(%esp), %edi
+ movl %edx, (%edi)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%edi)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %ebx, 8(%edi)
+ movl 20(%eax), %ebx
+ movl %edx, 12(%edi)
+ movl 20(%ecx), %edx
+ adcl %ebx, %edx
+ movl 24(%eax), %ebx
+ movl %esi, 16(%edi)
+ movl 24(%ecx), %esi
+ adcl %ebx, %esi
+ movl 28(%eax), %ebx
+ movl %edx, 20(%edi)
+ movl 28(%ecx), %edx
+ adcl %ebx, %edx
+ movl 32(%eax), %ebx
+ movl %esi, 24(%edi)
+ movl 32(%ecx), %esi
+ adcl %ebx, %esi
+ movl 36(%eax), %ebx
+ movl %edx, 28(%edi)
+ movl 36(%ecx), %edx
+ adcl %ebx, %edx
+ movl 40(%eax), %ebx
+ movl %esi, 32(%edi)
+ movl 40(%ecx), %esi
+ adcl %ebx, %esi
+ movl 44(%eax), %ebx
+ movl %edx, 36(%edi)
+ movl 44(%ecx), %edx
+ adcl %ebx, %edx
+ movl 48(%eax), %ebx
+ movl %esi, 40(%edi)
+ movl 48(%ecx), %esi
+ adcl %ebx, %esi
+ movl 52(%eax), %ebx
+ movl %edx, 44(%edi)
+ movl 52(%ecx), %edx
+ adcl %ebx, %edx
+ movl 56(%eax), %ebx
+ movl %esi, 48(%edi)
+ movl 56(%ecx), %esi
+ adcl %ebx, %esi
+ movl 60(%eax), %ebx
+ movl %edx, 52(%edi)
+ movl 60(%ecx), %edx
+ adcl %ebx, %edx
+ movl %esi, 56(%edi)
+ movl %edx, 60(%edi)
+ movl 64(%eax), %eax
+ movl 64(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 64(%edi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end261:
+ .size mcl_fp_addPre17L, .Lfunc_end261-mcl_fp_addPre17L
+
+ .globl mcl_fp_subPre17L
+ .align 16, 0x90
+ .type mcl_fp_subPre17L,@function
+mcl_fp_subPre17L: # @mcl_fp_subPre17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebp
+ sbbl 8(%edx), %ebp
+ movl 20(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebx)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebp, 8(%ebx)
+ movl 20(%edx), %ebp
+ movl %esi, 12(%ebx)
+ movl 20(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 24(%edx), %ebp
+ movl %edi, 16(%ebx)
+ movl 24(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 28(%edx), %ebp
+ movl %esi, 20(%ebx)
+ movl 28(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 32(%edx), %ebp
+ movl %edi, 24(%ebx)
+ movl 32(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 36(%edx), %ebp
+ movl %esi, 28(%ebx)
+ movl 36(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 40(%edx), %ebp
+ movl %edi, 32(%ebx)
+ movl 40(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 44(%edx), %ebp
+ movl %esi, 36(%ebx)
+ movl 44(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 48(%edx), %ebp
+ movl %edi, 40(%ebx)
+ movl 48(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 52(%edx), %ebp
+ movl %esi, 44(%ebx)
+ movl 52(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 56(%edx), %ebp
+ movl %edi, 48(%ebx)
+ movl 56(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 60(%edx), %ebp
+ movl %esi, 52(%ebx)
+ movl 60(%ecx), %esi
+ sbbl %ebp, %esi
+ movl %edi, 56(%ebx)
+ movl %esi, 60(%ebx)
+ movl 64(%edx), %edx
+ movl 64(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 64(%ebx)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end262:
+ .size mcl_fp_subPre17L, .Lfunc_end262-mcl_fp_subPre17L
+
+ .globl mcl_fp_shr1_17L
+ .align 16, 0x90
+ .type mcl_fp_shr1_17L,@function
+mcl_fp_shr1_17L: # @mcl_fp_shr1_17L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 48(%ecx)
+ movl 56(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 52(%ecx)
+ movl 60(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 56(%ecx)
+ movl 64(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl %esi, 60(%ecx)
+ shrl %eax
+ movl %eax, 64(%ecx)
+ popl %esi
+ retl
+.Lfunc_end263:
+ .size mcl_fp_shr1_17L, .Lfunc_end263-mcl_fp_shr1_17L
+
+ .globl mcl_fp_add17L
+ .align 16, 0x90
+ .type mcl_fp_add17L,@function
+mcl_fp_add17L: # @mcl_fp_add17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 88(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ movl 84(%esp), %edx
+ addl (%edx), %ecx
+ movl %ecx, %ebx
+ adcl 4(%edx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ adcl 8(%edx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl 16(%edx), %edi
+ adcl 12(%esi), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ adcl 20(%esi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ adcl 24(%esi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ adcl 28(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ adcl 32(%esi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 36(%edx), %eax
+ adcl 36(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%edx), %eax
+ adcl 40(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%edx), %eax
+ adcl 44(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 48(%edx), %eax
+ adcl 48(%esi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ adcl 52(%esi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 56(%edx), %eax
+ adcl 56(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 60(%edx), %ebp
+ adcl 60(%esi), %ebp
+ movl 64(%edx), %edx
+ adcl 64(%esi), %edx
+ movl 80(%esp), %esi
+ movl %ebx, (%esi)
+ movl %ebx, %eax
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%esi)
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%esi)
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%esi)
+ movl %edi, 16(%esi)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%esi)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%esi)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%esi)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%esi)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%esi)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%esi)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%esi)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 48(%esi)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 52(%esi)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 56(%esi)
+ movl %ebp, 60(%esi)
+ movl %edx, 64(%esi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 92(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 8(%edi), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 4(%esp), %eax # 4-byte Reload
+ sbbl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ sbbl 20(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 24(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ sbbl 44(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ sbbl 48(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 52(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ sbbl 56(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 60(%edi), %ebp
+ sbbl 64(%edi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB264_2
+# BB#1: # %nocarry
+ movl (%esp), %edi # 4-byte Reload
+ movl %edi, (%esi)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%esi)
+ movl 56(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%esi)
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%esi)
+ movl 4(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%esi)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%esi)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%esi)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%esi)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%esi)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%esi)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%esi)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%esi)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 48(%esi)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%esi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%esi)
+ movl %ebp, 60(%esi)
+ movl %edx, 64(%esi)
+.LBB264_2: # %carry
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end264:
+ .size mcl_fp_add17L, .Lfunc_end264-mcl_fp_add17L
+
+ .globl mcl_fp_addNF17L
+ .align 16, 0x90
+ .type mcl_fp_addNF17L,@function
+mcl_fp_addNF17L: # @mcl_fp_addNF17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $132, %esp
+ movl 160(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 156(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 4(%esi), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 64(%eax), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 60(%eax), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 56(%eax), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 52(%eax), %ebp
+ movl 48(%eax), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 44(%eax), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 40(%eax), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 36(%eax), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 28(%eax), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 20(%eax), %ebx
+ movl 16(%eax), %edi
+ movl 12(%eax), %edx
+ movl 8(%eax), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 12(%esi), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 24(%esi), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 28(%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 32(%esi), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esi), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 40(%esi), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esi), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 48(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 52(%esi), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 56(%esi), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 60(%esi), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 64(%esi), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 164(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ subl (%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 4(%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 8(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%esi), %edx
+ sbbl 16(%esi), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%esi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 120(%esp), %ebx # 4-byte Reload
+ sbbl 24(%esi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 28(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 32(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 36(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ sbbl 40(%esi), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ sbbl 44(%esi), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ sbbl 48(%esi), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 52(%esi), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ movl %eax, %ebp
+ sbbl 56(%esi), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 60(%esi), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ movl %eax, %ebx
+ sbbl 64(%esi), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl %ebx, %esi
+ sarl $31, %esi
+ testl %esi, %esi
+ movl 84(%esp), %esi # 4-byte Reload
+ js .LBB265_2
+# BB#1:
+ movl (%esp), %esi # 4-byte Reload
+.LBB265_2:
+ movl 152(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB265_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB265_4:
+ movl %eax, 4(%ebx)
+ movl 108(%esp), %eax # 4-byte Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ js .LBB265_6
+# BB#5:
+ movl 8(%esp), %edi # 4-byte Reload
+.LBB265_6:
+ movl %edi, 8(%ebx)
+ movl 116(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ js .LBB265_8
+# BB#7:
+ movl %edx, %ecx
+.LBB265_8:
+ movl %ecx, 12(%ebx)
+ movl 104(%esp), %ecx # 4-byte Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ js .LBB265_10
+# BB#9:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB265_10:
+ movl %edx, 16(%ebx)
+ movl %ebp, %edx
+ js .LBB265_12
+# BB#11:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB265_12:
+ movl %esi, 20(%ebx)
+ movl 112(%esp), %ebp # 4-byte Reload
+ js .LBB265_14
+# BB#13:
+ movl 20(%esp), %esi # 4-byte Reload
+ movl %esi, 120(%esp) # 4-byte Spill
+.LBB265_14:
+ movl 120(%esp), %esi # 4-byte Reload
+ movl %esi, 24(%ebx)
+ js .LBB265_16
+# BB#15:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB265_16:
+ movl %ecx, 28(%ebx)
+ js .LBB265_18
+# BB#17:
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 124(%esp) # 4-byte Spill
+.LBB265_18:
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%ebx)
+ js .LBB265_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB265_20:
+ movl %eax, 36(%ebx)
+ movl 100(%esp), %ecx # 4-byte Reload
+ js .LBB265_22
+# BB#21:
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 128(%esp) # 4-byte Spill
+.LBB265_22:
+ movl 128(%esp), %eax # 4-byte Reload
+ movl %eax, 40(%ebx)
+ js .LBB265_24
+# BB#23:
+ movl 40(%esp), %ebp # 4-byte Reload
+.LBB265_24:
+ movl %ebp, 44(%ebx)
+ js .LBB265_26
+# BB#25:
+ movl 44(%esp), %edi # 4-byte Reload
+.LBB265_26:
+ movl %edi, 48(%ebx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB265_28
+# BB#27:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB265_28:
+ movl %eax, 52(%ebx)
+ js .LBB265_30
+# BB#29:
+ movl 52(%esp), %edx # 4-byte Reload
+.LBB265_30:
+ movl %edx, 56(%ebx)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB265_32
+# BB#31:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB265_32:
+ movl %eax, 60(%ebx)
+ js .LBB265_34
+# BB#33:
+ movl 60(%esp), %ecx # 4-byte Reload
+.LBB265_34:
+ movl %ecx, 64(%ebx)
+ addl $132, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end265:
+ .size mcl_fp_addNF17L, .Lfunc_end265-mcl_fp_addNF17L
+
+ .globl mcl_fp_sub17L
+ .align 16, 0x90
+ .type mcl_fp_sub17L,@function
+mcl_fp_sub17L: # @mcl_fp_sub17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl 88(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 92(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ sbbl 32(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ sbbl 44(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 48(%esi), %edx
+ sbbl 48(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 52(%esi), %ecx
+ sbbl 52(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 56(%esi), %eax
+ sbbl 56(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 60(%esi), %ebp
+ sbbl 60(%edi), %ebp
+ movl 64(%esi), %esi
+ sbbl 64(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 84(%esp), %ebx
+ movl 56(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 60(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%ebx)
+ movl %edx, 48(%ebx)
+ movl %ecx, 52(%ebx)
+ movl %eax, 56(%ebx)
+ movl %ebp, 60(%ebx)
+ movl %esi, 64(%ebx)
+ je .LBB266_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 96(%esp), %esi
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 8(%esi), %edi
+ movl 12(%esi), %eax
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 44(%esi), %eax
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl 48(%esi), %ecx
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%ebx)
+ movl 52(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 48(%ebx)
+ movl 56(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 52(%ebx)
+ movl %ecx, 56(%ebx)
+ movl 60(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 60(%ebx)
+ movl 64(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ebx)
+.LBB266_2: # %nocarry
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end266:
+ .size mcl_fp_sub17L, .Lfunc_end266-mcl_fp_sub17L
+
+ .globl mcl_fp_subNF17L
+ .align 16, 0x90
+ .type mcl_fp_subNF17L,@function
+mcl_fp_subNF17L: # @mcl_fp_subNF17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $112, %esp
+ movl 136(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 140(%esp), %edi
+ subl (%edi), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ sbbl 4(%edi), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%ecx), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 52(%ecx), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 40(%ecx), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 36(%ecx), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 36(%edi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ sbbl 40(%edi), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 52(%edi), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ sbbl 56(%edi), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ sbbl 60(%edi), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 64(%edi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ sarl $31, %ebx
+ movl %ebx, %edx
+ shldl $1, %ecx, %edx
+ movl 144(%esp), %eax
+ movl 28(%eax), %ecx
+ andl %edx, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 12(%eax), %ecx
+ andl %edx, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ andl %edx, %ecx
+ movl %ecx, %esi
+ andl (%eax), %edx
+ movl 64(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 60(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ roll %ebx
+ movl 56(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 52(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 48(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 44(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 40(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 36(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 24(%eax), %ebp
+ andl %ebx, %ebp
+ movl 20(%eax), %edi
+ andl %ebx, %edi
+ movl 16(%eax), %ecx
+ andl %ebx, %ecx
+ andl 8(%eax), %ebx
+ addl 68(%esp), %edx # 4-byte Folded Reload
+ movl %esi, %eax
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl 132(%esp), %esi
+ movl %edx, (%esi)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %eax, 4(%esi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 8(%esi)
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%esi)
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %ecx, 16(%esi)
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 20(%esi)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 24(%esi)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%esi)
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 108(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%esi)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%esi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 48(%esi)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 52(%esi)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 56(%esi)
+ movl %eax, 60(%esi)
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esi)
+ addl $112, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end267:
+ .size mcl_fp_subNF17L, .Lfunc_end267-mcl_fp_subNF17L
+
+ .globl mcl_fpDbl_add17L
+ .align 16, 0x90
+ .type mcl_fpDbl_add17L,@function
+mcl_fpDbl_add17L: # @mcl_fpDbl_add17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $128, %esp
+ movl 156(%esp), %ecx
+ movl 152(%esp), %edx
+ movl 12(%edx), %edi
+ movl 16(%edx), %esi
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edx), %ebp
+ movl 148(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edx), %ebp
+ adcl 8(%edx), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %esi
+ movl %ebp, 4(%eax)
+ movl 76(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%edx), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %esi, 16(%eax)
+ movl 24(%edx), %esi
+ adcl %ebx, %esi
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%edx), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %esi, 24(%eax)
+ movl 32(%edx), %esi
+ adcl %ebx, %esi
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%edx), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %esi, 32(%eax)
+ movl 40(%edx), %esi
+ adcl %ebx, %esi
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%edx), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %esi, 40(%eax)
+ movl 48(%edx), %esi
+ adcl %ebx, %esi
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%edx), %edi
+ adcl %ebx, %edi
+ movl 56(%ecx), %ebx
+ movl %esi, 48(%eax)
+ movl 56(%edx), %esi
+ adcl %ebx, %esi
+ movl 60(%ecx), %ebx
+ movl %edi, 52(%eax)
+ movl 60(%edx), %edi
+ adcl %ebx, %edi
+ movl 64(%ecx), %ebx
+ movl %esi, 56(%eax)
+ movl 64(%edx), %esi
+ adcl %ebx, %esi
+ movl 68(%ecx), %ebx
+ movl %edi, 60(%eax)
+ movl 68(%edx), %edi
+ adcl %ebx, %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 72(%ecx), %edi
+ movl %esi, 64(%eax)
+ movl 72(%edx), %eax
+ adcl %edi, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 76(%edx), %eax
+ adcl %ebp, %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl 80(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl 84(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 88(%ecx), %esi
+ movl 88(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%ecx), %esi
+ movl 92(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 96(%ecx), %esi
+ movl 96(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 100(%ecx), %esi
+ movl 100(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl 104(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%ecx), %esi
+ movl 108(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 112(%ecx), %esi
+ movl 112(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 116(%ecx), %esi
+ movl 116(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 120(%ecx), %edi
+ movl 120(%edx), %esi
+ adcl %edi, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 124(%ecx), %ebx
+ movl 124(%edx), %edi
+ adcl %ebx, %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 128(%ecx), %ebx
+ movl 128(%edx), %ebp
+ adcl %ebx, %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 132(%ecx), %ecx
+ movl 132(%edx), %edx
+ adcl %ecx, %edx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 160(%esp), %ebx
+ movl 92(%esp), %eax # 4-byte Reload
+ subl (%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ sbbl 4(%ebx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 8(%ebx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 12(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 16(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ sbbl 20(%ebx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ sbbl 24(%ebx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 28(%ebx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 32(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 36(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 40(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ sbbl 44(%ebx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 48(%ebx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 52(%ebx), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ sbbl 56(%ebx), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ sbbl 60(%ebx), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ sbbl 64(%ebx), %ebp
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB268_2
+# BB#1:
+ movl %ebp, %edx
+.LBB268_2:
+ testb %cl, %cl
+ movl 92(%esp), %eax # 4-byte Reload
+ movl 88(%esp), %esi # 4-byte Reload
+ movl 84(%esp), %edi # 4-byte Reload
+ movl 80(%esp), %ebx # 4-byte Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ jne .LBB268_4
+# BB#3:
+ movl (%esp), %esi # 4-byte Reload
+ movl 4(%esp), %edi # 4-byte Reload
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB268_4:
+ movl 148(%esp), %ecx
+ movl %eax, 68(%ecx)
+ movl %ecx, %eax
+ movl 96(%esp), %ecx # 4-byte Reload
+ movl %ecx, 72(%eax)
+ movl 100(%esp), %ecx # 4-byte Reload
+ movl %ecx, 76(%eax)
+ movl 104(%esp), %ecx # 4-byte Reload
+ movl %ecx, 80(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl %ecx, 84(%eax)
+ movl 112(%esp), %ecx # 4-byte Reload
+ movl %ecx, 88(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ movl %ecx, 92(%eax)
+ movl 120(%esp), %ecx # 4-byte Reload
+ movl %ecx, 96(%eax)
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl %ecx, 100(%eax)
+ movl %ebp, 104(%eax)
+ movl %ebx, 108(%eax)
+ movl %edi, 112(%eax)
+ movl %esi, 116(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ jne .LBB268_6
+# BB#5:
+ movl 52(%esp), %esi # 4-byte Reload
+.LBB268_6:
+ movl %esi, 120(%eax)
+ movl 68(%esp), %esi # 4-byte Reload
+ jne .LBB268_8
+# BB#7:
+ movl 56(%esp), %esi # 4-byte Reload
+.LBB268_8:
+ movl %esi, 124(%eax)
+ jne .LBB268_10
+# BB#9:
+ movl 60(%esp), %ecx # 4-byte Reload
+.LBB268_10:
+ movl %ecx, 128(%eax)
+ movl %edx, 132(%eax)
+ addl $128, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end268:
+ .size mcl_fpDbl_add17L, .Lfunc_end268-mcl_fpDbl_add17L
+
+ .globl mcl_fpDbl_sub17L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub17L,@function
+mcl_fpDbl_sub17L: # @mcl_fpDbl_sub17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $116, %esp
+ movl 140(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %edi
+ movl 144(%esp), %esi
+ subl (%esi), %eax
+ sbbl 4(%esi), %edi
+ movl 8(%edx), %ebx
+ sbbl 8(%esi), %ebx
+ movl 136(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edx), %eax
+ sbbl 12(%esi), %eax
+ movl %edi, 4(%ecx)
+ movl 16(%edx), %edi
+ sbbl 16(%esi), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%esi), %ebx
+ movl %eax, 12(%ecx)
+ movl 20(%edx), %eax
+ sbbl %ebx, %eax
+ movl 24(%esi), %ebx
+ movl %edi, 16(%ecx)
+ movl 24(%edx), %edi
+ sbbl %ebx, %edi
+ movl 28(%esi), %ebx
+ movl %eax, 20(%ecx)
+ movl 28(%edx), %eax
+ sbbl %ebx, %eax
+ movl 32(%esi), %ebx
+ movl %edi, 24(%ecx)
+ movl 32(%edx), %edi
+ sbbl %ebx, %edi
+ movl 36(%esi), %ebx
+ movl %eax, 28(%ecx)
+ movl 36(%edx), %eax
+ sbbl %ebx, %eax
+ movl 40(%esi), %ebx
+ movl %edi, 32(%ecx)
+ movl 40(%edx), %edi
+ sbbl %ebx, %edi
+ movl 44(%esi), %ebx
+ movl %eax, 36(%ecx)
+ movl 44(%edx), %eax
+ sbbl %ebx, %eax
+ movl 48(%esi), %ebx
+ movl %edi, 40(%ecx)
+ movl 48(%edx), %edi
+ sbbl %ebx, %edi
+ movl 52(%esi), %ebx
+ movl %eax, 44(%ecx)
+ movl 52(%edx), %eax
+ sbbl %ebx, %eax
+ movl 56(%esi), %ebx
+ movl %edi, 48(%ecx)
+ movl 56(%edx), %edi
+ sbbl %ebx, %edi
+ movl 60(%esi), %ebx
+ movl %eax, 52(%ecx)
+ movl 60(%edx), %eax
+ sbbl %ebx, %eax
+ movl 64(%esi), %ebx
+ movl %edi, 56(%ecx)
+ movl 64(%edx), %edi
+ sbbl %ebx, %edi
+ movl 68(%esi), %ebx
+ movl %eax, 60(%ecx)
+ movl 68(%edx), %eax
+ sbbl %ebx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 72(%esi), %eax
+ movl %edi, 64(%ecx)
+ movl 72(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 76(%esi), %eax
+ movl 76(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 80(%esi), %eax
+ movl 80(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 84(%esi), %eax
+ movl 84(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 88(%esi), %eax
+ movl 88(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 92(%esi), %eax
+ movl 92(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 96(%esi), %eax
+ movl 96(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 100(%esi), %eax
+ movl 100(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 104(%esi), %eax
+ movl 104(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 108(%esi), %eax
+ movl 108(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 112(%esi), %eax
+ movl 112(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 116(%esi), %eax
+ movl 116(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 120(%esi), %eax
+ movl 120(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 124(%esi), %eax
+ movl 124(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 128(%esi), %eax
+ movl 128(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 132(%esi), %eax
+ movl 132(%edx), %edx
+ sbbl %eax, %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 148(%esp), %ebp
+ jne .LBB269_1
+# BB#2:
+ movl $0, 76(%esp) # 4-byte Folded Spill
+ jmp .LBB269_3
+.LBB269_1:
+ movl 64(%ebp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+.LBB269_3:
+ testb %al, %al
+ jne .LBB269_4
+# BB#5:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ movl $0, %esi
+ jmp .LBB269_6
+.LBB269_4:
+ movl (%ebp), %esi
+ movl 4(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+.LBB269_6:
+ jne .LBB269_7
+# BB#8:
+ movl $0, 40(%esp) # 4-byte Folded Spill
+ jmp .LBB269_9
+.LBB269_7:
+ movl 60(%ebp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+.LBB269_9:
+ jne .LBB269_10
+# BB#11:
+ movl $0, 36(%esp) # 4-byte Folded Spill
+ jmp .LBB269_12
+.LBB269_10:
+ movl 56(%ebp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+.LBB269_12:
+ jne .LBB269_13
+# BB#14:
+ movl $0, 32(%esp) # 4-byte Folded Spill
+ jmp .LBB269_15
+.LBB269_13:
+ movl 52(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+.LBB269_15:
+ jne .LBB269_16
+# BB#17:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ jmp .LBB269_18
+.LBB269_16:
+ movl 48(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB269_18:
+ jne .LBB269_19
+# BB#20:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB269_21
+.LBB269_19:
+ movl 44(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB269_21:
+ jne .LBB269_22
+# BB#23:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB269_24
+.LBB269_22:
+ movl 40(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB269_24:
+ jne .LBB269_25
+# BB#26:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB269_27
+.LBB269_25:
+ movl 36(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB269_27:
+ jne .LBB269_28
+# BB#29:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB269_30
+.LBB269_28:
+ movl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB269_30:
+ jne .LBB269_31
+# BB#32:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB269_33
+.LBB269_31:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB269_33:
+ jne .LBB269_34
+# BB#35:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB269_36
+.LBB269_34:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB269_36:
+ jne .LBB269_37
+# BB#38:
+ movl $0, %ebx
+ jmp .LBB269_39
+.LBB269_37:
+ movl 20(%ebp), %ebx
+.LBB269_39:
+ jne .LBB269_40
+# BB#41:
+ movl $0, %edi
+ jmp .LBB269_42
+.LBB269_40:
+ movl 16(%ebp), %edi
+.LBB269_42:
+ jne .LBB269_43
+# BB#44:
+ movl %ebp, %eax
+ movl $0, %ebp
+ jmp .LBB269_45
+.LBB269_43:
+ movl %ebp, %eax
+ movl 12(%eax), %ebp
+.LBB269_45:
+ jne .LBB269_46
+# BB#47:
+ xorl %eax, %eax
+ jmp .LBB269_48
+.LBB269_46:
+ movl 8(%eax), %eax
+.LBB269_48:
+ addl 52(%esp), %esi # 4-byte Folded Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 68(%ecx)
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 72(%ecx)
+ adcl 56(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 76(%ecx)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %ebp, 80(%ecx)
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 84(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 88(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl 8(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 96(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 100(%ecx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 104(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 108(%ecx)
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 112(%ecx)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 116(%ecx)
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 120(%ecx)
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 124(%ecx)
+ movl %eax, 128(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 132(%ecx)
+ addl $116, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end269:
+ .size mcl_fpDbl_sub17L, .Lfunc_end269-mcl_fpDbl_sub17L
+
+
+ .section ".note.GNU-stack","",@progbits